var/home/core/zuul-output/0000755000175000017500000000000015136434724014536 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136451100015465 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000346562315136450716020301 0ustar corecoreQzikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD ~3Eڤ펯_ˎ6Ϸ7+%f?長ox[o8W56 -b6"οƼ>UWm׫Y_?|uݗ[y[L-V_pY_P-bXwûxwAۋt[~ _P^~&RY,yDy~z]fs,l<L& " d :o5J=nJw1f /%\xiƙQʀClxv< |N ?%5$) y5o? fۮ?tT)x[@Y[`VQYY0gr.W9{r&r%LӶ`zV=Tooz2¨(PQ wFh k0&S V3M.*x6Ql"%qYHzn4}*|dd#)3c 0'Jw A57&Q"ԉQIF$%* 4B.K$*/Gmt΍L/1/ %T%e63I[wdt6o[ .`:J ]HmS>v5gCh31 )Kh3i J1hG{aD4iӌçN/e] o;iF]u54!h/9Y@$9GAOI=2,!N{\00{B"唄(".V.U) _.f*g,Z0>?<;~9.뙘 vKAb;-$JRPţ*描Լf^`iwoW~wSL2uQO)qai]>yE*,?k 9Z29}}(4ҲIFyG -^W6yY<*uvf d |TRZ;j?| |!I糓 sw`{s0Aȶ9W E%*mG:tëoG(;h0!}qfJz硂Ϧ4Ck9]٣Z%T%x~5r.N`$g`Խ!:*Wni|QXj0NbYe獸]fNdƭwq <ć;_ʧNs9[(=!@Q,}s=LN YlYd'Z;o.K'[-הp|A*Z*}QJ0SqAYE0i5P-$̿<_d^"]}Z|-5rC wjof'(%*݅^J">CMMQQ؏*ΧL ߁NPi?$;g&立q^-:}KA8Nnn6C;XHK:lL4Aْ .vqHP"P.dTrcD Yjz_aL_8};\N<:R€ N0RQ⚮FkeZ< )VCRQrC|}nw_~ܥ0~fgKAw^};fs)1K MޠPBUB1J{Ⱦ79`®3uO0T-Oy+tǭQI%Q$SiJ. 9F[L1c!zG|k{kEu+Q & "> 3J?5OͩLH.:;ߡ֖QʡCOx]*9W C;6)SCVOאUʇq )$ {SG!pN7,/M(.ΰdƛޜP16$ c:!%Piocej_H!CEF L훨bِp{!*({bʂAtĘ5dw9}ŒEanvVZ?C}!w,ƍͩ?9} [oF2(Y}Q7^{E}xA|AŜt;y}=W<*e'&Ж0(ݕ`{az^su/x)W>OK(BSsǽҰ%>kh5nIYk'LVc(a<1mCޢmp.֣?5t罦X[nMcow&|||x:k/.EoV%#?%W۱`F=>l |fͨ3|'_iMcĚIdo阊;md^6%rd9#_v2:Y`&US tDkQ;>" ء:9_))wF|;~(XA PLjy*#etĨB$"xㄡʪMc~)j 1駭~բ>XiN .U轋RQ'Vt3,F3,#Y3,kJ3,LhVnKauomˠ_>2h-/ ђ(9Uq EmFjq1jX]DןR24d ;嶑, }t&&\5u17\I@ 5O? ʴ(aPqPϟ'Xa>EE衢^}p/:F?}bi0>Oh%\x(bdF"F 'u Qx`j#(g6zƯRo(lџŤnE7^k(|(4s\9#.\r= (mO(f=rWmd'rDZ~;o\mkmB`s ~7!GdјCyEߖs|n|zu0VhI|/{}BC6q>HĜ]Xgy G[Ŷ.|37xo=N4wjDH>:&EOΆ<䧊1v@b&툒f!yO){~%gq~.LK78F#E01g.u7^Ew_lv۠M0}qk:Lx%` urJp)>I(>z`{|puB"8#YkrZ .`h(eek[?̱ՒOOc&!dVzMEHH*V"MC Qؽ1Omsz/v0vȌJBIG,CNˆ-L{L #cNqgVR2r뭲⭊ڰ08uirP qNUӛ<|߈$m뫷dùB Z^-_dsz=F8jH˽&DUh+9k̈́W^̤F˖.kL5̻wS"!5<@&] WE\wMc%={_bD&k 5:lb69OBCC*Fn) u{Hk|v;tCl2m s]-$zQpɡr~]Si!ڣZmʢ鉗phw j8\c4>0` R?da,ȍ/ءfQ 2ؐfc}l 2窾ۉ1k;A@z>T+DE 6Хm<쉶K`'#NC5CL]5ݶI5XK.N)Q!>zt?zpPC ¶.vBTcm"Bsp rjﺧK]0/k<'dzM2dk–flE]_vE P / څZg`9r| 5W;`.4&XkĴp 6l0Cз5O[{B-bC\/`m(9A< f`mPіpNЦXn6g5m 7aTcTA,} q:|CBp_uFȆx6ڮܷnZ8dsMS^HэUlq 8\C[n膗:68DkM\7"Ǻzfbx]ۮC=1ÓOv$sY6eX%]Y{⦁# &SlM'iMJ았 t% ~@1c@K?k^rEXws zz.8`hiPܮbC7~n b?`CtjT6l>X+,Qb5ȳp`FMeXÅ0+!86{V5y8 M`_Uw ȗkU]a[.D}"\I5/1o٩|U戻,6t錳"EFk:ZM/!ݛ@pRu Iヵvyne 0=HH3n@.>C@{GP 9::3(6e™nvOσ =?6ͪ)Bppًu_w/m/0}T>CUX\!xl=ZVM\aٟ6h㗶E۶{O#8ظSex 61<}`:+"ȸtqGҜ$Hf\TRJbż31"qrk3S/0g:P{bk?-7B+b*n1[ln 1A 1BH ^iӣ?hl&0Ɠbb__2++oI~!&-[TWvxZ>4(sgz1v&YN2姟d4"?oWNW݃yh~%DTt^W7q.@ L⃳662G,:* $: e~7[/P%F on~$dƹɥO"dޢt|BpYqc@P`ڄj҆anCѢMU sf`Yɇك]@Rɯ?ٽf? ntպ$ˣ>TDNIGW .Z#YmDvS|]F)5vSsiExţ=8#r&ᘡĩDȈ\d cRKw*#zJ9tT :<XK*ɤwoJarExfKB4t@y[6OO6qDfEz]1,ʹB֒H ֱw;SpM8hGG&ƫEJި_1N`Ac2 GP)"nD&D #-aGoz%<ѡh (jF9L`fMN]eʮ"3_q7:.rRGT;}:֪a$)gPSj0j3hLư/7:D-F۶c}87uixoxG+5EekV{:_d* |a%ĉUHSR0=>u)oQCC;^u'}8H0]+ES,n?UU{ x~ʓOy_>?/>l8MrHID2VSsMX^"NۯDc558c&'K0L /C5YDqNe~ض˸nErc֋@aw*r܀0 a {RQXV-/p:MP\<=<^越a/bz?ܓvjIg3MN4:]U]STa,@OKd9]kSCY[L0؉":_҃.$_f%# $B=UI y=;iEZ]DaUS@''mhSt6"+ҶT DEu&ݛȘPˬ-Ő\B`xr`"F'Iٺ*DnA)yzr^!3Ír!S$,.:+d̋BʺJ#SX*8ҁW7~>oOFe-<uJQ|FZEP__gi(`0/ƍcv7go2G$ N%v$^^&Q 4AMbvvɀ1J{ڔhэC'eUCOīpAiB^MP=MQ`=JB!"]b6Ƞi]ItЀ'Vf:yo=K˞r:( n72-˒#K9T\aVܩO "^OF1y`|Q'eű^\ơ' .hvX%e"xm뻱~0GBeFO0ޑ]w(zM6j\v00׍YɓHڦd%NzT@gI""&D@iS=3&N+ǵtX^7ǩX"CA⥎å+K=sNӺU_$uS3㬎IXX692r$I~gJE{tQI·D)Km=μh, $tWWQ:?UlDHNh9}UNR1٣f?o"OZ@_G}($A]v4·g+T7Y{s!AZ`#e"& cM }IÅQIwlcL67N'c!n[mi3)WfsF:M"uҷs.1!뾧1%s,hQs|hx̗3%*v9(I;:'>uQ+v)vR/egBhAAdh]4H:nV$tHI98/)=mͭ ڐn}}~ק?g_6WĩDRc0]rY9'z .(jHI :{HG}HDN`h7@{jnE#[dz;_ XC.l.;oX]}:>3K0V|WD\hnZm֏op};ԫ^(fL}0/E>ƥN7OQ.8[ʔh,Rt:p<0-ʁiߟt[A3)i>3Z i򩸉*ΏlA" &:1;O]-wgϊ)hn&i'v"/ͤqr@8!̴G~7u5/>HB)iYBAXKL =Z@ >lN%hwiiUsIA8Y&=~,?5D K\gd(ZH8@x~5w.4\h(`dc)}1Kqi4~'p#;_|{/MC8&%Og3E#O%`N)p#4YUh^ ɨڻ#Ch@(R &Z+<3ݰbSt=&yo|BL,1+tmgo 恤hˍJ+peJkQ޽b(LF/ bSULy8UA'3DZ笪|U--sG8`u#qGYܷwodK#i  *!Jg,JֿlP1ÒJG9TV\/B{MӨ&Ę4.s"x| ^J/ŵ_iݼGw eIJipFrO{W7>^.hd/}!1Nſ4af祋,˂/^8\3m!'(Ֆ5Q&xo 8;'Jbo&XL_ʣ^^&Lq2E3,v1ɢ~__ٯ\-8!ד|$@D.[?d|9i,p?߼=܊Ce"e:9[v~\:HP 8'k0t1A!jlX)v/L+NhBUx~Ga>Z"Q8_jTLRKtL L+BT-҂=ll魳Cf[L胍̎`7rIkzS- (J[(6 b Fڨ? ZvƂcUkdύ-׫E7e0ϕ{6K!x^>$ N7 l 2JZ=0]Sה(*CjaS:p/N6I*Mx8"EȿQa[1 ŶD3u8j`B59qU]ג`upHЍE_f8TK&cB}L@XY2 y_CJ8WrSHT{dp"ӹz'eJq2WlXz60H!ND@UwVFڧD5>H[f@!=߸6V[%Zn|"G4ȇʩ@]>Y$9FWōm_Tt)ib+q&EXFu򾬳ųqTĔO_fC2NfOwCm;Ll9wQÏT!9U}W3Q#vc]ll>ŰAVG Y%.9VndЗ? ǫ>*Hk6>!8l7> c7!8bdEˊx9y:9244ANb n\"X>Y`bb*h%1(*Dra^sh6"BzƾH( ."e1B QhmvKlXtӈx92aI`"Ǒm O\B!,ZDbjKM%q%E](>Hm 2z=E`^LRф%V Ng2Kh}?l@VC|J,Ah%ShՉ8Y4czt~$)*527l;V jэ(_,jm$9O+/S`]icm wy\Mڗ$-DJ|lj*}b,?XAe0bX@ h0w[}BU0v']#,.C!Pu&Jm l ?Q>}O+D7 P=8! ЛN_[d0Yݎ@2!vZ{Ibi/^cygwpГzY'Ź$[fr;)ٖf ՠ5Kcxg* E Qu{$Sڸ7p~ 5ClkmS?J%E92' {]ҙ%rXgs+"sc9| m>T]"JرתBΌ-z:ԭ!,Z~eL:U⧘pvzz0V 0'Dco\Z^dnZ7a)AH ߘ§gb'Tu&T~KQg\Ѝpk}#UH_> ,+3Wo~mB@!YgҾ߾5I'w_9I}qww}U4뭱ԏ,}e]ukDl`jSQ7ׅHa/˟EU^Ip~XWW兹Q7WyTz|nˇ q_A-[~W_w*o:nUs$ -#9wh:R 4+%ݽs&Z&em-ld b.E1բ${]]Nj"䁖%5#3dCY%HAK1/FnRL3XɯEr^v,bfbIJ'@hX!<[@ ,&,]$*բk+E$dwS:֢̆ Uh``%NĀVecK[ld-'“5XIυU0؋6\h%1GK(-Yv% 'mQ; GdZ%gI-XE]V f#, `0!E%:H =VՑӄ| ņ6lL t1]Cr^ݔ[TN1Vz[~Sv@.yΕ`%3>|ttc‡-5=VW+ ?Vv_ﮓW puȇml/-S:ŸvŀU#-:m\xkjr)_x*8ͼ8! `:m~*v+paRVIr %A%`[oLxSzR怕#H% }sCcv"4Iۄ+oĤz'ep2\3ǚc7:$[|a.f0E*0.)ZyT1t3`thU^ۢ, "!Ӱ&jDkMAhQ|![gIK v~(Jc%* [dI368fp*}Dscǻ3V]dt*am.>!LcoJrKmzqvmz܅EAZ#u-9`x 92$4_!9WՠZ̓?Wnm>0Es%DƖ|2H\2+AaTaBˮ}L@dr_WԦc>IdA Od[=jlek=XJ|&+-T1n8TڎP$%s,qgt+ZSxToE7U9/nq.JY):Y:7AIU"cKӝ$'qo%\Q!%c5\Z9N4Zxz,dI*ƙ(EfE"`{ipEIՒ9| Olz3;QϢ*:]ք+I&s5w` q:CdʰH`X?"}B=-/M~C>''1RWX%2@KʸH'sۄ`gRpcf:|XUZ#OSt/G~-~o2:u)"\**vdC_ˆdvcƕMlA&HwlF@ա5+F>ΰ-q>0*Oѹ eO/I!m|xV&\b<9$4Nvm^آ]$GBoMjKٕy{H 31Հm-PġUX$[eR6Oœ-m~)-&!883\6y 8V p-lprG]斾-3jsqY~ sj\+9[rAJsT=A"dadY(R+q%so 9Xe>sY~ am9] x*7 zjC.5Wg󵸊y!1Ua5=taU>ƔCmp-7^m斻[h$~Bdz0jKXq"mũɸHNd"yc Ptu>x2;W`VZ l0VmWh#ZYBK]nc˝߂~[jRuo[|["w;?2Y :WLg-Ӂ ;UAG1 '3Jٵ Q;ASUȿjek3XLHV sR ҕNE@Ubc@ ۮOȩlea?C&kOf}}+jVO,̸7Nj.ʿq]_\_>e˲\oUQ\W?47Ww?KpgSSۘV,nK.\U?VEuY]^VW0R=<ު˜˻ x}[ێ'|;c^ Mۣ >ve'Z|V+Bs=8'kP 3 |}44S8UPi7f;VWnBq~MefZV>:/?Ac 1M'I`v22؆DT!+Z=p%'3JQK-͗R?KkΤOq,*kTa>x!8Vj%coY˶4/m0NE(G2]+G~H+aeR1;DAV$ "*1]Y~ d-->'I`{W6㿊'oMeKDI̥>dis3DET=8~$q$t ufg? $axGI X$UgO6Ⱦ*L`_mV$bKU n$.U*DdR*2]iHc_RK"oz-SJS1cIpM?00bυi}7d:d=4:F]oJ7 9٫(pxL^9&0 ۦ;`S4fz8*^ELp &]0l{S 1OD/c='Gf:T ak2#N$kAfU)G\HP*zºCo<@k*#Xf\*E"@DRHF%ըbj7)OPD֨*T_Gt꨷o,ѹRY]=ݕyTMjQ]Vv`,qKr9hNFq^0$ë2~tּISC6e}KJ'auD)Gf*ׯJXy1Q _@nctg~atVh]%`p@aqS.] Ry2SpP0JG'秢 wySb%2ף#B |ylOLk}cܳ_uG5FO)A;d8AFx[A$ 2hDFy)bVRig s=s=k^r+x1Gmgd虧Yw;e=K%'sK~KE[y#Juj˅|#R8I`q0m]'t(I%T~<.T0X"=ðQ:'|a0bR=\_Sр[9+//SޓV nYLgutgrYiwR`ZD4Hۋٹ\)aW@l{9L긳ą^Z!;| CBl?VgIީ4Iz9 `W42ksx4yCߎzyķiPȳ*OY":.n+ao{d ~ToAs` su|yB[E5I䨌fdj' L`SEzp>n YC 574A2ߕHLEM}t(DR$?,g6"mê99A<#ƶl6![mF ?3X< pÇɴKҠ&b-׋% \\Vo;wu u0-30Hۏ%!#7 Q)m^Wh#4<۫ʑ ǼA?EZL¼, Æ.}{yuR_8Z@`ݴL7-fK e#L&"A2%n&ɴ)2ID6>}s=/u$@4(^}H,Whj%׭eY4y-h88Q^}xwJn[ԨeFԐ\MѮ'Z̋TȻ`_ϻS3O`ɧ  }k_N~|@m),mۧ&՘[g%F%wTbDDX9vsB7Rj.2Vq* M4-QӶOY=ΓEOA]X)U5C5! ƕ: lS 's{7+8DQš"[&A hjE4p.!?Oޜ9y jq56!|ٟQc{(釓$ N.l!^*GiP,Rm|}R*?_zdBJ qxhܕFU"V4S ?4[j.I0=1=I&kD4moI 8muh{rDS r zÃa\-aht>#U-xL34SvNu 5*Le[*}djZڨސ=j{Cb9f_!E (B۳/3<3 H}=Jl u;OK=nSM)?ڻxzxiY I_=@Mȥ'H}s1"=id!Buɾh(XAtI.5ќf ϻ)578$˼R5vseEoN=2][8Q:2!vռ!TkUTa3„2fi.|w3_Y_{!䮎`22X[Ylb7^D O>L}uؔ_;j ÝI0 I& J` L!1 ߿&): שHT-{CfRu)*y'gkS*~{Lc: &r--k&[i#fyVR 0N LbX$iZX AZՕ( -/'XNF./#.FeS3Dw1˛J]*U`d-D A\y%L$% \'U`),9%0kkI&okx*E#!,o1'rLT]ok5` qҴB9OZ ۛlhu*oLOzX6>2DlF0 /.bgFWS.nlGn(;xs*,#[qxI{# P{:9N\s. t H Z2է@x;${3͖C-ջ}=3`w&{Wzz+sДk1@;v{<-[} oWڪpf?*`01W W63>!ͥ+}YcMkOVfHQ~:Z6P0]}Nݑ1,ܝ\NWl7 md;OaK8&C&0 ع]< 4A_ƉO`J. 8;$ `KH@؎/$n|@=ؕC/Dc߾eC0(l)nr(D$)o\KvvHnh8kanÙ+Jgy8ݙ&Hf#ZR9IQJwoFI9x|zќO5 L<~GLSMɳ .k>]3"'"x&}mȶy&iLI8}=l>BUFH(y'o+rTO/6Le*'K(: gO(::C/gIyM*2}# |fS:a-a(=J p )Lk峗0(pR44@?bn7wZ7p>!HWâק77πTDƎQ6)I2IoNt+h7xq U퇷G~هsOGEuƐ0z;N}aj@dviNlnP+[(\!v N,@X-<٬r4@hX-}_m{P^u/񭮸rlr(w@@  4( @ C@Qr65p9"Kp`'bp@4bZ݋B(^GЀf^F,n RPrv <ۡ)9nXܗ v q/' ċr6,]t* g!EU%bq; X,!*q:E$;G]0ǿ" rpCDn7ELo t#0X4vΟ,M?aߴPܮir$R4_JxX(v-pAPc팭\\YON:Gvg5eZf3Hۤ%Lr:y> )Ml5\ mO} A6Ntc_k܎KulmCנ޵9ފ ̚e٘cEOS3#h8ۜuƽ-=8f`5nӗjs헏bӍN-rkT{VWBw}/;;ԷnݕH<.q1B:(#4BTf u%YXS<;VvB"]hb-toD#")X>vO -G g#, n%n;J%pd"(/~ pwOCճ`eSD #lsA5uXGbqݣ ~ǥ{xuYCq( q4NHN$ʞc^1ܱ \X SqS#"E#,* `bnQ=20 {-n#@=Z6+q@ 6rUp('{2 ӜlBmGfI?K&F_g5lF  3*)N]V eSWP7 E]>MUL g[iT]PbL8^:x(ς72KE-Ϥi9kS//zK$ǰ/Ӹ&4.BRSrߨ5dtDt@3"?6c2Jѝg[+7a,a%.dy@$§eAra0FBD:lcZeFvOelYG8U._aֵ. lZZ-µլZtdP0ŏSYǴ֞,5Ts`D(Q`!| 4- n--29;s97h^L,?~.`#::)b;2CFVHx8 J*Arh#Iݏ嘰pCoOq+.܆?E򠘪ծ,tC?*[̳鶔f-K;[Kş.],džUEpgY۔ Vy䤺Y4%,0c<_x|Ki8~[$ww\/׊'L .>tM YJ Eb=Σ$^L)fXa$HM2an&_1x04 (5C|0Q%6"p0VJZg{RMXHoL va=)ԠY `JN$vRmL=$ R[Sq`o%a$Q`dYMԄ59d@.cͺn 4y8.&a@`>\d ڎz[[]rq~RLm[W-Y!d0(`3HjQr,^>{9EgE\>gi6a:ؔІiC[U~-([ (]P4Aٞ]]RK=*=VپvݩfڧޕKZZ;k.4Q=%v^AP{wA j)Ϊ: sy%p=@3NYt}mJg<@Ơ4-0X/zx"] zFDБAO1%)ZAF29Y>LհR#*x#T.dO3rn {V2Qq.09Txpz` Goe#]WkRYiY`svyYxXlޜG ELq@ǖI &XD=tokĆSw%*n.1ci#S3kok:& x:aEկKTG.g="GZ=m m ΢(Ä/r(sBK!wƒ_iedZjBƿ4h#vV_ X =,bZ {l|=HטkCb,Cx/6ւ MGbIkhOZDEǡ=8qv7 PYv{F Sqۓpm XhqM ⫼[;:coCqZEErwW ~1rMhqw7<Y +Tt_P X ՟ã;r0-n赫:;Dު<&4V) L 3+%E^/TFҥ,lLޓMD2γ}`,ౝf-׬g5gn<~pqM$/PIzнTV20z'"Ѽ %w7˪?:?Z'zXr]$EVߏ4rTK:E6fQqU X99)iU%x:zrP{+d0%X]Zr,k-pr6Ԭ ~Iy_͘lt q l1 f0-V,ʉ:;6?ѧk[nH{~qÆ'v$D^BSӓ 腖h1}Uy?3[xmR! (.wV&'˔0Sc)`#85Lu)?k^65ea3DΰJ^Pa}x[6op{iu"=h9'O?s/vWVI g[`~s8ҳ^˼UVyL-fKo?sj'[?xq=;:7!D͝ yzv *~NMCY7'Jb]  `:e=XcVⓋi+|1E5U߁yU#F {m~c'}wZ"ߖgI%]h)1XbiLZRkW>b$3E# dz#яU@kxA+xLkYGuUW8V>b$|3".h)Wz[L8y(W3{Itz \J;YvLB_'V(YHҕfq޾AX/'HD/Dj71˃pc9F$QЍP12; 8Z~݇ڡV9J=;z-mbuQ`錰(*1^ZΠsp|([5yx|}hB<06(MJG6)ojjfY- 5W7C˗G)*;V@dsxx>Ue%O"6%Qv'ɯcKC3IQ·RFCɴDܱ9m ki~5FŝRM~zZ Νê͢E(haƈa'o6'p%IV x%NyQ hPiZGlKuHpp<" ̓1[FU CJsK0`$Q`k~BgI5idZ&W=nxo0amdhLce.Uy2oʓd*뭠c4U8*"Y| !C&0.Lp3 kUL" ,S^RtHIˊ@HQ}rKΩLEO-x]O+褻sae~$AQ[&::FRa::Nh좩==@Q!EV0.3屑FkQxp{1z[F F%;M<:v'h-Cϼu}祩%$cEUHAd?hZ)l&k+itBѺ>Ա7`f)6\<%!\b$]ni)0aplAsT0Ia77U{k$ WPRV]2>z~wisޒ|3 ;VfS{c>9Ulr8}$h9)& 讫s6i|zc$ A_˹BST *읇)0U]|COeu*T[2bTE[Aoy:zg$FiFoK%+uDJ͐{F4t>a-DTG>\ sI Pq TjeJKFu]~鄣QsUJf;̮6b|%qp~l`-xPeJ:*Eζ#ӃV>/>k8$8*uU Ë]X:N[Þ$m\MAEӎb|*b3O |9TWd󅠱&bczn#ulȥ!I.D3xieS'?VrSޭtBwUy3FwLWm.bR^H7TK(ńUhDC~f7˘=`.8xsPeΔgpe Kql!F h+pȏLBR3vFnӱ{$nnp.fiWŒzӕpA}g'Gb&ϹL6LH{NՃ׎Ax_ᝣaJX1 w`$RxN\78~tYE)痞7Ӂ:M{xِxe+gpDm;|K9> ] C3W3DB*4E8Lt=[>wsCҍ~9Y]p>8'+opY.%Bc"%&d9FY\[fzU&#޳~qkӉ^3֊mo`-:-N2sBpK࠷ՔNXwыr.{92۷-;4)G/+Btb۪-dLc$]--TU7!'j+(}༏FJA,eP-cN/B'Go80xg۶1<(]X'Q,ZʣXYN$58R/UF..QBfAHQHgH는gPLLb0:k"V 3E.%Hf0q5fɟ_Hpc>[u>svܻRcuNE XmNj̺uU"ʴKAK;g=L#NZ[Jc`ASHmDM*rqNI3$8.0pC{s$8 SbMV Z h}~AꈜHA lyY8z 5A=m,uUNS6=VKvʈ^kvpR(j OMyG8ajo3G$s\Ծ8242±mI?(Ge W~i!.@3 _"vYdXh_&q0+*kD?p9ƃI, K▛44 - ʁԺ9{p+dT 5!x ^ҬlXhC 1x6mK_h& R+UTeta>3 ;vx͈0^qwvORu7, KK.I ޚyQ4?6y0I7pB-vcZ)k'#B?=6$8&1^]} L"3IppW-N 'PVO݁>s ,U0b_ Өy^H[IT̬2Ɲh/ĽÔ M2=!e`Z#Snlg3FR $yfۭ M'?A6q#{BM,$Izx׬}$q0RO# }۰Dq(i ED11 08NYi}>Bk|0߀FZj9g' 1(}h%e3`$]ww$8ET:ˤ؀y.I|MΡFzI8HpHtg6♠+uY͍hs3+D]QR7A0CeBxj04HU5 NXϑ ;lkŖ.;sc6K1diznJޑ8_C[mVZ}z+̫uO>o)EmW/m=G?]{o6*y06(l7vn0"D*ѭ_+ٙCJe'ۑXI8$wϋ_|ᛞ0΍ ƑB3É70qU,R<2IMTedJ*ruTF}st;'?~f)Mԗ?0؈`Vm7Ó<'&kFnp{~v҄ZEj%M.f4p/~x"^Qg0fNa} و>wy)FEk pw}9} qfzAPyy.FB$k0aEkeO G @Dyȇa2uίY4tfT9˺N9C6yd ). 4P:7TK0\+="|w\F#sxm*#Ϗ:yc-g}7'r@G7\{XDx/:{Wp w{ NcNzzY-iUE;UB^zrx @e]^18luԀ^V]ヷLQtx<*5(=˳+BqA5>@L|;G+!I_ uqQD&xYv/>\`(Bs%5 1W 1?#Fy>+!Y@;] F5s4C*B@.7;C Q]FA4̾Gh .+Gp$Ͻw44 _>NЏ#R$+JYW'ίώ{K02S5W336;h1f>6T=DwQ1@7bj[6C>ۺ-U5eE`DZw۝rsZ]]E*[e*69sCp(Q> cR;_pFF\q W˙(ys,T J-c42㫣{Y|X aFq1]ф8jL+ b-º"˝(0z4hUוUMq"NեߕoZaccA[M pGY;9Du>mB&83A7WY: DoZ6-Lc3%^a_pX,,\f}8wr/|B0w!<{##ޢ'd<5'V 1 =* |ĝ@7+ݗ+2lK=2@5z `iUA8c-R+UA{lJ% 6 "=BK1[6~n#ZuHUXhQѪiЪ!ˣFK9 H" IMl4pڮIm lC4)q-8ۢ߰f~ކ(CS[-4#m7Tȅ$cZ-ZD-iW4l&jE2 (~gŬҍbNoܺKyָM`֢O LKT?Yn6hBs5V@)\̊q}z(wu'ax0kuHL 'nOx0OQ9e6CߤE3 }.aoO<*PqK&g :jQR/_c &a9V/Ⱥ}_tH0Ֆ:_&BjD chݟ5M;ӌ~ķ֦a_YoZa&i kaHN9F- "(Odء)ʷ55x'Ltk& ~<Lc%kjE0VP!SRX?_r"k=l,UGwSIQai<}{ =/ڒZSq%I)&l%{U{g7Ye@F CRѳ9G!*(j y{WPZɊ1^AzB @^IٽRt?~8r]%P;(zYn܏'yXFXFcDuSS0L)&p5:qlH`RHZ .][kΟiٴp(?6!w㛀[g@D:}GtmfwpYgL)-Le\Q{e|YSq)>R418\Pg%̠4u*a10>|,,M;j$M;_$ujMLRA el}Ը1O;M0*!`xxj@P_?׌4 IJ=t٣2eED! A~x/ʧj:c{`f}$"Dtw"\k1{M)C}oBFJ>u@uf;{S+'flJSH0kf8]ChӨ޶ bk%eE% |+(j`(h3Klg8ݜjYJP!иr?7MmD2Xʤ$RW)T?h.﭂fӌ:h:&#zY[]NrK$=~_91y!(zc;n85>x,D(ʧo;WtT\r&<; 'oU/g#WO✜ނT-A .XU+u g`pz?ؽ%ͣn%lzu}Φ<'15umhv$?p]-!³@R*TϒcInjo>E&@ir֥f!T% yO'OVʥ)mRqk t!k7?Y}3罳wlmxRA>;خZ콙{7|9ݽ#Oo cmoܫrO`ܭ/PɋO Gi_7 0GG SD(xF}M^97IMot_+xD{[7fހqslJdnDc/Vkz!ݵaA~t 8jvLwe6+oA`?X8lu^vܮ$Qc﷍B*FSk wuT3{CQkfA=eSF1 L,^/KMk'E7W(}f7z4YXN ھP(C|e>\IɽDEv ejDi>ʀ F=3, ^OF|sUh h=Rljx㵺!pm;bҬ;=.]3 Yߛ2zn8I8!~ĈИK fq (%I-gȔY&-85=;@-̐c1F:~/Nll@˘# 41YMM#@yafH} J4r%g'G'gY$qL0RPE,p85*"ĐH8FCĖu|z|BOE}NHdu o&{n={n=rpkPˑl#ni5̼tvD!bBTP&f6)5Zx'#TnHqjyJc|ˆhnYtE{tB0o2pa#\bM ҂y'-6(D)&ei,,s:FX)1T:of>v@ &pOhAZnS<1QK,?wR&qth :&NkX)C)2V)brCOcZ,iA5ųɶ[LwjrS:~ @/|hk"&л:F\uOާ -y^NHQGL=EJ֩p$Tq)J%SⰍc*dTdz3[l&8)mxkv= ~Tj V{Nad R(%XX1(b1E(&LntUIHhTdkT>CfD*ja1 D$Z adÚ MRPB1r.eAH, Mpi775S RY髪Kc!H&&F 29lZ )%8M:ÑI-̉(-08}{t&{"=ȥ5HVpj$K!\q:yڎ' V(* -kqHn~?5q ۹cfӣFTDiI~M=(4CJp,"8@ϐX4/~ķidx3PUlJA:$٫#GN4{TPx <}-mRZ6UnZeQ3΂KI5c*Z jU ύ`+k SNpQP/wĜSC%' clD&.N5!Xv(S4M!N)%P쌈ɑӢK8"لX mXJ)21aKNb8۔%)儓49} vO Nm 4LUbx6Ö($]c,Q$I%H#q(9,Ϛ0BE QklW,F*'RuKfƿ%OfGSqE{D(z(lɀsܛ.Sn Hw[q`H9J4N3Vd*X|˾þ*=iNd_ՁIj>\ 0m|߳\eӺ h"N`e,Y ӓY$'XځKHG>'-I:c6ߖISa nb9ڳ٘Óig(yg Lǹ8?#L\zR+fz~:>/z0H缱bz0K=H)JռGv% D6IѼ]m1&i`B]* x#FkVW+9ªM@mޚ ݖhiTKCYE(̻`,1~:dXuш7eׂ):ּYlکWf[-vIӺM>WD{>D`LNU:ݒI:q ?ѴT3'YrWLx]ط7l47U{Aeӗl2m5aKEge"eko5oֹ%w/P &<i,Dcnv,y~(܌fHx/ ,b>uyr!z3~ E6o=o`(aVw.`i~ܶJT(l~'25_}Q0oW4eICq zz:5֚rr; cZ|z"B"W˶*|ucdf1ga!~1̸M9ֈf1*lVVD+{~t-='fاg|:X ;0Y9*o}nM.V}{غW=󉳽n#oli CoD>-KEPXV+ B(WTq BmO`_~76}GT w4k*E@FAA4UlP۠nvwg.)+Kc˜J՛l:A`53M\+B#tC12y^C!|i~HW[ 5"N) M N:|L\Rua`EB@*ۺWuԔՏ6g^yq} =kPr?OF/ñgR`tQҍe_h213r90KD4>$}@~Czc%x{t3>za(PnޅW1\YݏTwC%Si}iV4.(Qhۢ(@*-zo6_`7Nș|}F@ fRE]'fAŠS]bTuk"qwΊ"VxmF; X*|;+vw9iMɫ$cQ /_迖/Y<;131!RϷKjF䐉ӡOY9TURnNbBIF"'.ޣlK&$]eWcūʏ>2xA6?'_3kWhx<ђ2MXPW/}O'.(Kȣ$DtXojjt}C.Y:c+jG,3e5(|魖B-y855~I_"G-t̸f^RQg,8Fu4:~e Լ[)Y~OZT\;` GpVVSaYPE~\gr2^Asm|!o`GoU6TeC/$ F}a0HK&;eӯ]25@bBYJmwOl䖿qX^X]cR˸~I/; ͗q6W@ @ s̲l:523/n-i0gM_㝿^1q8yUw ~ӏ1Wl=ppɢ܌8m"7C-2} K{>%XAFOKWui=\lj6&nub^"Ry|',O4'C)jXGd+ͯ~~nQ,-+NyE>`۠-dp2 6*~qcы7|/dƈ|3{?LXALki*uB'cݔ8e%] m*E w0ÏdQZ$1!W#KcNQRwѢMIKG"*TT`34O!&㶚O;c Ai͎A* ^ pu[&Q(6,f \ h|:<RJθ$E`@DOh-UͬN7TJu+e.v7~< ~}DXŪ:{̇I\VQ`N.xB0')&?O^??r7{5rW0~&\$'C? M][mCVy?x]HOwKx/o6A2-)Pgfo|1˼`>,m~&\Z߬|򙏃_Aq[>];X5vf|iꃱKVұ*+ $:77^ԅԌntYu}\Ҁ6p86*E,NF!lpJc'XD 2*hc8bb `^\Y}̃wI ϕO1)Aqm*\T.n0 hp~t%fQĔ7}>f /.E毁Ao~Oޡ/qm\~ÖY8 zȶ_VH 8ky1Y_n7OZ_G byqmY?zȖ"f,!kޟǣtΧڽ+z9̽)]V;k7/ `FWr=zY'`~2 wDwɷa yYE.0rx ]} 2 X[ִfo.̲Vxwo11SDoAZ}D- -o$ڼu,hpbK'ST{2՞L'ST{2՞L'SL5Bd9(~U6.b>ѵ,o0>8wYlQ7Swe=#䅜y C/`~<{{ꚞwd,VW;lr#X/"ȵϫ鯵R%+%4Z bVk̈́Q&Uyf+Mj>Ic " bB̥#!)G\oS▜=dO8P۞凈Um):u5 N_s}߫#rb\O?vuO Ȧ}|][x!e[ǟK6` Sx ɹ:oz2r`zXz G>q;1yy~?oqP/rI†^$kV!A9E"B$xwqP9GO8*LͦrAD_ˡ05$y ƀIJWǒ,Qi53>(`O8z6֬8p޶qO1eD *@\Lu%hTxZT6d$/a0J$Y$H8UIV'3yTDdy﷫tu hs.bAק= ^__}\}OiA<>\AOT9s!P ȓ'|?BypN;$|0R֜aO(_M#s+I.?|O8ɪo匲0oA 9{Ł^3} ~O}JЧ}N+E=>Gܐ`.tsfKSoħ$X/|-m<Etw}#h -)?oöm9F08rHP"KPK#gܻ''T5a7^#>;_d87{zs!4B"N ( S)? VbCB0ǐPS^.˦|x@@Ȱ QJkJdt*GE>Qp/yZ >tD- P }](`?"\͕ 8ɰ C<U'< T8}` Nw{pׁS1^㻀ٿcH#܍JyEݍwo|7y w!cZ")Ą^L-5D %tf=5ȧٹŎ|nm+p @h$2gp@5FqP9GO8*7\.+uP+]-ɰivAwteȧY!?dkbhpJ%,YI,f&SbiG>Q9Fв:I)ѤBc.2.s "mTvAyBtp܏x>H_r5@dL!" ̔hXٔEQQ"% 598%hM\Ӏ<(5%B/k,ҌA@p4 pُfGCHRGѯ/3I%^}jJՎjJ7/ M0.C1 8l O 3VjS0NpN-!eS8h!%IE2$zI C>Y'd&HF h0CF'5J]||Jƞ4(#3RV$[cG9w@m'׌i{X J}Qz h8آRmM Iv_1=/I+#N`S\kh-0ܻm"')>gg[$ "SBXgUTX<(@ 6weD.* pj9w}W h0F ɗsFTC?M8cgQC&|/8]s&?l1DKZrNה -5:+ dR|Z4FA'{őO--u@=\z`J-vv~+vD22*03 r=Y0'E$PD]F}a&2A.lEv'j[x%, ;U:=lۖtd P1@r7Wi @ޖul 6P_AE/`{.)Q,aZES&[.6rx/y_TΘV y Ǵf:J9^)|0[J73%E<.S"ZJMI%9{őOUp͵b!JyAdk;89S,kZ!9T=t17:WX.&Q1[LDXLt}UƓh9 DX5)A# o:;Z e1% k9V2G%ȧ8z6vqlq3H={ZrEM̹@ײrFSmb\Ę(jniސLJx=f% K .x+:xwKyG]k|sTy+ۛjUE!Ea_I;#!];GUP7&XjTɜ΀w@ObƁOjO|>c .jnI \v/hp2cOɚ\)ذ ALdt{eT%nFD]0_53fKw,r}hlZBFB$vLL1og~t\-g r4"iIBTIpU.4`y; x|Yn X;"flp\{Z'3aQ:EnwGAGAOTb~%B@ĠC*Kr̓T RFJ!irñACGO09|vhIڢmH+N'TdBfrM%[c.z7x׬tϓ}z02.zF]LKyhI 6Xa֚ MD&IgsGKtrp}r1/~TΈy/5n1{էZV\>n{IV  &hT>%Rdt%WD[J[V2*^VT^\shEk Qb+RN,ý0OIT5Vv?^09t6AFOU13'f/kT % itǺS^ sC%AT ~ ߯eY5ND^Fg<E>U3NlύXS2EZFnξם;[`K_FLj\_ h"PЀmL>XK XҌ^m}Y#V:죡 `E>UvNj>0C#d4<@06}7jݜßTE%K5:iX{?/eDkL;׺l$I8Ty&mFtE7hѰK.QDds-6$hIIYɣ 2⹾UFOT-g q<Ֆp4.5vDVht,Er}ތ>\3-f` ڽ-b9A!{;C}9dFp>]7cZkq|*/#fqQ 8 B+mD^0 gaT"OܯG`hۼLTk*$Lk3G4qY (,3崯.ǒ0a'jWR;g=O68rI@Ù F* O`Ap(tvCa\jHT$`I*I*N}"{fyӯ#.aPc_d4<gyNc4Nݰ}vT'Kݼ2Y76@Tjގ+-2e: 3}"r\כϛ]VPpԂ䷫PSd4<۱ MFށQeXF˶eKm.R,ggq\Z5uq9ÉG <'(j֋50'KՌ8)1^2.r|F/}B"+ Tq-Jnmo@FMXddo&ۇo.j8 \S݈e__0Tq2:Η^F<E>Uvj9X:z$b.Gb5j@dz0IX[|Vgl΀㪰e×UAkGM V8Tr@Srڝ1ίoX>k Mc4es<'>M 8ۯ9$C>UbtPxQ *pJ͚W`n ,w%[ZgrvG]6i7NF}'ln 1I_p'O|8QQ"% I$CJњNdHHw@"HT`MNC5pp3wgm(C Qwp\Ł w2Q>1Y2Zδ\&$G`S2ZRY\Rෛ1-Xd@F˨Q\mgmDR /s8R%Pp`ӫ-d@FBPSU.)v/EHEZs_5D p,2𢑹ĕ$t5|~w^9= e=g;⭨ a4qDYPGtmF] hTkrۓ,#TMΛP"ЀY`7eG޳8Wi%,lrȇ,|"Di9_QCm[rM s,EVƱ5wct$#jxP}tW Y{6a;{EJʧԘPm*Uiu^BWLc x ~#ƨ |x̉3Q=8{$OǣVnjԊQ\iFm8L݁'F{ЋF̯Pm:vGoZk"ŴiUTwO0D Z:/o I\1&5$,崪~b@pax7|Ek|MqHI)8[lKjFcԒ}Qz e;X%i(6e &ݕ|V=j@k9M9(V &91!l/X }јv/PR7e TYzFj.Sz_`p`J[RK&>9t,@ЧkN%X!}}ɥMqV=&?ǘY*MF&o&eqfJ_O9ud'q:Q׭~e@cY' rQ 2^`j%fQt1f߫XDK/$GEshbU=-TiTθs ||Y e/ȬT w<$|\=Q:ݍ3 PIMl>sn:d^*Sh[J[ N28M7<>>v?F ݏQR[ū =jyFrS!vF҂P>uG8!biF`a1*n+^Y"z+GTfqM,_)\g*E.7`d€KCL,6`:w/9 dM[w19(n(?s%Enlԁ1ً&4$ zVO1 (Sb5w~^դ*mu򺥁\)iEnRtSe=Hf3F>4O28ͺo4]ԡ*_E,̮]G3yfW9){{e|9ufaMg_  --%6]>2R0ɲ\LyMJsƮc\ ZܹQy۫YcW̫}q+蒊߇S}{!)~JurJ3kKT-썽msK\䠎5 NbgMeċJ)S$sJ7/;h,1.gB(҃ca5 01^唛ڻi^cK#^Kolz|W*d:TRzp|J8lYb>dgX8WzJAn*4bYKeI38ԃfXj/f)ͳ 6!ψ#FIAr'yÄq]" EYtzN%r1Ugx }.I5p`Ϛη˛`%weIй2?paѧ˨ ҋpN*f5q7?i~7rf H=En@=7ynsiS,We Up*FA飽;ǑRgqMLpL&̑͠W~Wf_[c>S\"ePmEIr&8wqv$i1?fei(l[ NfB{JkYg8v8u:S` xX)}PZ)jkW%grw|UG;]h@07گ /?[| [tnӧƄ0 zY˵4OD]:j6N_f`evvΪ[g]\BXu~V l"^قĴ>{#믉 Gv']fkä7WP8K7opH.U4+[€KZV'iLn)XjUF>Jk(5 ̖&'u2aK>Nln?YGqM}8zKa;-m{wFQ6C| 7u>R>$~4J[kGTa xrRKЏAJf=m-Է"%}xYL[T4jO Hx$nR`xRA/o)7SA "yVk Ƒbf_0nhyL!sj@^0J 60!ѽzf-P}{5 g> mϖ{(bR)v%.v Dg>BA1C(6sÆTi;knbmi-Uޙ&6$J{%ԇ9Kt!<ɓ;b).^ 6Bw'>P?SGg'LI2pq^Z!$vHODT<Ì gkCxv*\Iq$ $:Τ,O fރd,#6 4UiR<僯+ae" =H#)&Y)xJk1E4uJ ;!i)=Q+*˱yD_O;Z5`ݙ=r  p0P&Fj jÉp.'0Y&2;8ČaGxxǎs`9{u7>Z5/1DcOvpt'd"Sr*ꄀ R9XmC2'2 e0 8M:ƠCE!SUrJ4Eҩ „V&&w&);uQF<$^soQ0:$ 2"GHS.!J\r.2K2jm7%c9Nc™oFt$y9_nQB\D DTX/Qǧ18v_R?.~)lҢRKu$qf[<鰾7ŵ0pܒ, < rkk rIeض~kEG `2cHq8ݎpxs*.u0k1igdLNqP*R7kAǧ1Gu a4.8,C@%<3B`O?_BmGǾ(O \_pX*а"KUN(=PdyzSoWvHH%t7dNK_.s"j4cZ/9-j'P}ВBo\|;7+y4<#^"bB~ߡHPQLFar.TaCD7rH2/<ȓ :؁T_@86Ea^ p{OM(iD %Y.,h@N\e:=t|6K@ZoAkW4dVi,\˄!D2z|`[@̌:?p/Ob]T)X>!"u&JR.I2 'zc"P 0V|i "`EG_f[nI4Z%ی¾DŽ@N{~F%wǗv7(RyMTJjWO xpԘ?:~cpu7e/Xve/0:39@#+e5A3MT` z-&]j"T |%Ej"4el`L.bFM#TEh0޼ְgPa8'1r+󇎒gUjx !v:$[LcJ#(*p8wE>9BG'<>A8xe¢A 3(PqC|?5[=sOc"VQDd!i\zJ ü9Če x|& ,Uc  Z`U,rÊLIfOd˩{%%&R 1.R _~01P}{s0[1ǧ1Ć )"'S!x rX1eoc6F1IG&vgyA ؀^P%ըwOINRQyxmJt/MA3<0פ??ݵЛ0] ByQwIP݋O P 0uR|($<9X]i*: fNx:r:Yg֧@ʷ+{ǣ}qM2 <_>_jVOisi%C& PcVOp4?͚};rwpG˝Jd`zp@j^oE#Nq2:1n&y=Yi۞w,4;BlIs͕ ^nI=R'bTfMlVza۪Z=yVb Ĥ9& [ps3iᦪm=}r݆ip [}TO:IF_zs 2:]ԁ*%/I/^Nޕ6r$B4al}4`a{yرamyD"eRQ"dJ*e{[&bTFfDFf_XDT{W!gEO'=vܠ\ֶG'Sq26Gtz)q;F2}c= &L0%Io LnۍZJ@'I+ºd_6ł տ~jd AMn?GVo?JMےmM C'kL!Z~?цog?-|lcEeO@;I]/A~W1MV|~[٠],tQv|-b*& ηNd7_>ZJ7LnyoF˫tXu7w;ѯ@M;Dr#Nr%A|S]Xlw:vSwC.6>." bo= 2=ŕ"IʘpRTȊWs)uN¬ԕlH*K[yOY|?N*Tn5`2O&Nu7dѻ8-.Mbs\÷AHx7~Rn9g3Mmv6'RY=[4 Gks!^<;;^<8ԚoV>^ 7.L;`>ܦ9赏z-ťo:}?4\w>:}|;^*$t~7]w]ՙ~i+OӢtm92?v8棙 frLS0Vuz T֗Hx3]-77FqvkshoGI$[f ĪdدQ] NfS-g77irhrA_5K_6M;';OyQ_F( ܾ~ ~y,0|80-gqzJ[ˊclqL/=f1JIQh9d+'&PQG/,] i0YQ}Y9%*=ߕ008Rd]"UŅU%/+O¹ZVepdRiL'0\ۨ8<^X8{VǓl_X -!N4ڰ|&stՈje 99wr|YٵՅVXJ1}z X.ey{*[Y X;W"e4W# G>:Mߺ$H;K01 i81RPg79##/D}~dM:xp2!wɁzPlR;liLi-n4"ɂqZՈnng[A\ KP$4AN8**:5:~`vj9eR|ڷy= btqiwnOF-H4~F|RS9e=5r?BF_^K[PoF-S:ԫa*-Osae ~'C^R;} <,K'а8DQKcߴx~ξw$QM\PY=>~z}WrjD)+"2VhqE.j#=(vwڄ,OqtxZW<\1%{ԭ&_}|g-JoPQAVU즬k⹅`=f$ӵ' 7]Vn'vsH]+g 泤z-xw*Pw,gP2󈘴q=23auͧ放!> tZ^l/1ە0b9ĭ؇ Ql+}r)0 ͘4S֪0&C;-ˌL`cͽoᗴ  9T%8:]Irl-#^, E!`h/gus-BMt@>##ٛ+yR1c@[ ֐JG? |Yć (Kmfe䏧LZۻ9F񬈇R|ɤT po(qFΉyX.,pMmE0:z&|Be`ÂDL!MW Y"3F+V/>>q[ߎ=](q/j8uTsoM}?/Pӳg|xzy.jI ZY`z*^7; b 9rAVYEH2죑-ͱ^2FYĹTcsLrIt>[ m>ͯ6gj 嬔 %)٬ ۉiNtRmjtW?L ^΁yb@9ChAa  e`PnүѦSZ1252FQĬE05Oi)d'ٻ >UEV`s*Ӽ:שSs)Ѝ6$n?tL| ?N'L.(aj 9(|:9E~$sT"Q%V2 S\^MwjGboEJ_»*Ȳ`wlxVr07`mBjK"VokV,~_f^C2~"5F.<+3E8g)Jnē~2ӣ[3ޖP`خ"i+Z8tsT 4KbB|Vۘ#ӧM.ʂ^.JmNW&q[#2ȔIӄp;oߓ,V@ӿ~P\0& }C8C Ι\JW_7QXN0.Vxɹ;)6 7Hmx=Ti)`U(eA=S=t }P_\0 .4Fp댥Ⱥt6M85U%Ѝ_]+T'}===`c)~ؠ{;ֻsJwkiJRpB I1\ȼ+<~$> )mC|pLOJKHRc6ǫz}K2dqES!hVF]Ḋmg5DԨ#(TÔvb𞳬Y_PF֤gd{57y3J_'bs*YҪm|F 4^wRMBPJ NC15`JyXBJ >AK4NJ(lڡ H,lO'jՓQ'}PJzU U2JcTTqlҖ.h*DHw> [cW'6Uc)5R0PZh5O@jVXqNȩbx0PoeF!fj 2 N9p~~fk.wWc2΋ +~₂..Z'+CM Ysx؊f t٭\`GcV̋LR2e[žWXqe) Ulgz<(ns#{UG2> jSUx^t෽CTWcVO*7#eI B~_pl6+h48WM5h1TӀ'; )V,F))\?gЇ_.E ܽQOٛ%egw|vn>Z)$%WL$Pl .DPQ`T KmU9{F+ˉKg&]w疏7Ns(ox4kT8qiŮ;Z'Z,Lz\L{5|♶S:UK΋nhȏe), wH`(S!'@r %1P ;u. +P\!MB`t][o8+Ac_!)^`sav=/;=P$9r鳘EqĊK1)A-Wb 﫻z[0^oA~_m  ;z.۬DW n 4] T dlUt;5?=Sgx ;97zZ\ĦDhgHm$uSStc(cBn#߫,z}Sasan$O E&+z2bV Df\ Q.*\>CT(Z]֑ڥ X6~d<󋅹vTNeXZyP ^ T 'p勫&{$2T7==U7"\0fbxHk K䓲ܹڙC+\K>̣p1NFVe@PVl| ahz7lO=f:ĩmO SUHkHH_z3א?h1y*`,蹔9TuԂ_6(jBQʚM}N<2m+Ͻ̟?l !}=yE8iJb@"B-0'^duvS,o+h/"7Yx`B-PZHq T(`%T2%rRBO55wH} VsR9ŽcQ~ Bc S1rW 774gLO 4# uQaJ_~=::::::^X^X^X^X$S6 7];A!ZU<7nc(XdT^-&ؿssn=49xBp}cO5##(Y|ŧF)3aiVXJ@U^N~S$>+U \4y$2a{8QjǶl6<{#Jx4 @k*9sHƹi? 9*w#(dXH4)/&G v2 IaVB #A2~vkp=_1+9id46a&Fٳn/tLq#yAh/V|{S;VᦙAr}ؠ*p jeTT zj&TţxFamoBl kRs < ++֠\N o訝֤v5'ki-,~z465qp /Uy`(~V<ICoMBbvk2I/~9qWRp6*^y`G?}s J ȍ[]v| 8s%p k""7nTi$bR}~yZ3rB'VJh%mW2B/ ea\pm5LN7QN'zYP5<ն҅aAh+L-z:64]oQ6_-C߄8O9(ަݠ^D:z |T1/fȃApUi@g+|Clz?nz6u 4hs6PS3Ĥ{M$0+j7<]{.k:X,UӤ;UQ 7R~xU3( ( MCr8Jeei5~g j2A R4^(22(8eaޙ} Ž7QڛfX+jH#Iݻj0]s],^s\/KcF|Pd&kws48G/ o$Y/Fo޵u@Cna:# ROY09V8^EO-|ҹh(J:|208=nA;S**cZ& T dGap2`'aG$aN= A F|6|M0< 3Rkx-™ppOdiç_>/SAS}TE8vZY}Gh^Ve:lkcF&ndGap2*Gl1mؚ z !Y;.7P Ñ!TǮӚaxc@ SSyC׵Qo< #)L`}R*&8eq` eLm m`Fb'85#MZS8v @Iw/.G 7@Za5,'a;6Ulˣ08FOӨ&C},tܘ'G@Hvb@{2$$E@@Ss"K`qbi8hvwJĚ"fjJUa (#*<DYq"҄DmVQ/]|yQEi-d`i# kp+ D+QlEo[zmK_0>eaa4MWn.ancT҅Qnv}cfAf.>FPQ·Kbz=h˃-Ojv~XٺM@``̚Y\a1ΐZtSQvjGPQ4ʫbc?6Jp޸TI-MExZ*W #ORa>Eu"CD8"6@H\0Cepw,('MJC1(L[LjD4:aGap =eY8*~pT^4GHT~BWnސ)XyDՐhGapЖʸZu] + Q7bNQ5dڎdH{\/#楨p sV"pTV0$۳hˣ08&vDAQKBYrGap0_{zyGSyJTԕ32br?( Nڕ7nY)QԽ(c`k9T&uRY22XT(5x>m6r(5hf3\ 9\ e!?ΰn.GYTpGYp8sMМuAI~'eÒ_E%׆bSc:4WC;B x :rN@eX|(R78Ū^4#DW%+xR= '=,_G*1iSؕNy=`| d^u 1atiTϐZOk! Tߥ`[{71n/1iSCL ã2"bj?,8bm?/)f_>g@[E1/lfu 7rAnsyw00) z$IC4WaX*D(MyLp;6Z5.GCe^Z]H,E3( >2AD9A0taHc(X!*8\~+y08f0">, %Ya*t(KB9$x@XSPpv!:|Bs@}1Uz͖f Q0r5eU 500Հ 4<۫ilɐY@dLNԾmqJCć{\&f#QK ¤j ,=[ɠv}x]ixX~Y\Dٖ7wL7'\bJ\( ٌq䡃tza@?E7eo#).v.q"3DƊT64B2sڝR+_]* ֎ `֚Xq㖓'oܭͭBPѪrOƝ12,^Skie,y/C!Y_'iFKXp.aq{a'nu-5-)\]0s庝@`k^ޭ@R5!vu;ooiv"oЌpOfO-kȉh VKAIirdIn(?Sul07*UaV]rw .֭-epy@g_b#83w/!Nw-w+޻[?b} ǧm?r4Jװv.fp-Vv;|w^p-wrZ;~sržo~4w<+]nIR{-%r\"#21OE;2ǚZpEM1*XY3"wwnngEzb+l `\*-fkxa>^{Wbu7u/Spp-\NC1|{qr'[uqX>,վn$v}5il?[oԻņhgK1l>Z+79M ;"[l6FYp?-oB<=:?h d/*2QG4"ci#tnDLY|f.pcGoQk[ /k(w:̃ fLLs6z@9 Q*5䨄V8dNqM&z<`O9DŽRnRj]-Y ;X+\I?B"Z\|YS\Å+ C7ӻ[vuH>z#M~r?0~7K`@`?^ܷ$ŋ힝VG5{sc;\809L숉jq>C {`+nYxu(w!y2grpb#Vz0;NPIwp=`.KYI  NVEBGʝ1d+m־s; y";k׿uMfOq+RFIp#/~g33*r?<C׾Zqd#[6r,sw9 #~ l6W!1_% 61Be+d<=V 3e >bXfCp7N}~pN1> zI1'yc0`3VT*K V9;OJn,)џeX0Nd~VY!4|56όs?\;5w_2@K2*3Qsud 6J}riV\kCX gWJfUQ>> ABH'|`V~{R[6e^跾Ivxye;\F>*܀N2u}(R䏊H@6I?OjK 0?Y6 |Xi0VՂ**RKp5 ޾y?WEEx@2ó'J5nf<45nmlͱ8pn~+_R ߂R@뽟g|U9MNc}NRԈ٠Bl\Oyqj1M=V>ͶǙh}jo*2 _V: zo7tvj?⽺b8jߒfPC$KߙIv؆%&ITH+s̒m3އP^7c[ 5e\bꆤ~m6z=VP-,M^dv͓bؖ4E1UjCLkhUG8)ݝi#` ѕD;2~$o'ޞqݾ`V=gl}K쀷Vfj{~l=T#N-ٞ޶pn=;ZfR-S#RqrBbPBd3rqΓ_ԂgW_ɠfF!紏QtbZiV+?5/:;wq5HR'ͪ>< {U.jTOxs4WyF@^qef%Twz$bq U`e 7Zyh}':m!yKՀ8yKΨ6W(4 y͋'!nfB+/;AfB(H6T:J9+Wp뭢0MA'ERAI53AX)$=.0Xa#õt(}Y# IGTGR%9O 2k)vVaQR,E-FE5V `֣HJݓt&!oCH11RI% Q'8F [fQԄ[$tu.҅.t< t64\1 *!xC@d :JJNrnhJcIGД#w&Þmo:yaOscoNiZaG!'tB#GĒQ0"Ǚ"B+23wiȧ{Xg' o'[| ;wjbJvo޲4qRwUg Ů܂oQp 8fP^63G(`fUP+~} ++W,uS}N}osՌ)]wY:|WWn(s*}!K,RbA /Ѧؑ{ *# C)QrQb=&9Vie[AP[HBu s;~_ ]׃n5jZ@*b-.lD~}c1۲^ Cq--# PpT9S(M0I1ₖeWJ d9$^ 4 /X^_W8"_1qaҌ;$$/` 1cd(`~Tuh>_TX 6_pDwV mRRJJi./+YQRN8٬?$tW aW&)_h/lx%JO} J<KB8!Nr,I]&C0^qWBcKtӱXKH ;sY+cI8Vr{<Ϛe1$ygJ,{; hni62 އz^Q`_F6Ոհ|F!хCvobf5/ R ŧ>̨>˘7?63`zq5LH}z &8\V n2ѿe]% Y?mȱF`XT-"!(3-WO=쫲:Z>MO@yKgB %L|O_0FOAN?巚g'6dP2?,m唋mĠmHoCa^>"Q8+՛{)(n3?lF:-( Qz!H ˥C ۧ Z.[\!]Jy]dlߎ/ DCl[cx>Ń.{oٻ进W !o!`ogS~_ɳAn1uCX^ 0l;^?諝9#.lNg؋- ]튕Ɵ7}vh{خ@ULQĿ+4Gn+)5-*CQk )k8VBzi[d 1>!v&ב֤a"zx֊ON=vP43U[i({) =Zpc+bSNR+Tn4iHYOg<F2WHXu^wPCuafx?`U(^#Dz͵3bc!b琒*'j,llE5Z-w:j%|jԶ!Ъ"K_nC6mgwJrY{Nsr/HX Ǭqk,xJ$޶) ᣀ$~޳` K£ }}!>JOift N ս@*A1c^YXզ~KP@m2EP]!@( .G*9%âd%rbr%VD hI:Bm2P;sgǰB( G:K{Oa&?{zQ5wH}ѣ_^ =WXfI JrWPIB!QpUcO.XAsVB14d̓w *=ט{z. ocCg/>U/-Wk58A{z[ڲ]rm:%xi<ݺқZmg"uk؇"u=mBZ @WJHK i V:R*%%RBZJHK i)!-%(fRBZJHK i)!-%RR;5NS;5NS7\*S;5NS;5NUS+5Nߩwjߩwjߩwj'ߩwj)b=BRtjSds! ށb !xXA.|tJB0=)NZW<8E쇘ZaV u4'(xO(xO5S1T=xO5i))]x,n΍ǣe.Iּ |hvY#gxE~$!l' .f-qB} YiWoG_/k~V1J$l`v)cpy^(?IͱBYs 3+yFK*0Ʃzo}`\^ݾhTFJ51՚xhz[' 5E+=b)Vǻ67%Ɇ9,FRey"iyyt#'RҨ5ڴK6L*jٷ6LItT ivO |Ȃt6XP}7;;<Q;GtiM#tCZ7EaE3XBρ~jf1Mq xu!o%auaҁ~Q^vNCH^5 6Ct儱{J9g0(H)oa]]·.6Ek!T%W"yLɴ!TI6Ct#$Js$#Io[ɴ6KR义#f}"]BA5kK׵1Qla^r I!>tj|7̋W7n4;<8T'H-W.wƛRղ 씢LX簙r3vv)Lc}qwP/LxvNx *̎HdN0E9i(R ZLᴄw^O*K;1lPA j^b z+'oo(+ g`7q^G `뿿)M9*,k x.1yX5^9C #|Na;Ⱥvw&Qz-(;@qJ2<1~+Tq_ Unlz©|熳%P' L8Gi 0gj8v>_*vqXExڌpQ}ym<$^y#2D`o4lA] / ŝ#ﳔK8kn:m4(چ@^a,:FLzK%iC X)Tb4K{F] ilO@A. )nD e^*0mFy#OS$x8}LNU9#~6#t<=t :pؔ$Jc,M}6#tUDχ 1 9GTm!w4WFliB{cK**>+&Teo 5>'3d7r(I O;)a) w=Gê{" HEv"wZ랉#lvCԮѾL WKd\`]3)Nζȯ#i7)BbӉ+Tv} $'uS)jGr^trEh9e_^2[X]^Yj)3C@{*Akl7;qc7IqNzԨ=*\/vD55;".7Aa7Ia8Gw紦G%<zX fLP)\zLssb[`ۂ#M鑶95H-N!~p9@Z\oJ) 03A|^_0yeU-"Lg8XR{ H ; Oɴ߿оzz0k k^Mbi]֞;'D|g/.]њ9D0Ҵ+b܌7EOre !c/v@̱vxOcYP6WM_:uulrW2FP RɄ'd*"K&Ljlc)3-fjt/H-+.n!\l.R!Rp3iH1)TRi{W$zFmmQ[nu1~oZQbυ #29gAo 8:1n, mhbcυ*yzjh Y M${8bl~4Q:szԨυQNOXtfIuMqogCvB'mz0lE{?(}rTZH) 5=In߲VŠ_KD@)4oۊ@AN DLjCpq+mk%)8>SG.:\Ih~.+OcʻcN) 5AY:aͪ?Ov? PrB)H <OO~ 8t??agBdlzЖ~m=܎k1B?~& @AѓJC Q!(B]4:v#˼x=E C!*x,h0Fv>!s;8Q|MJR2,v7\{P0r姟GaA9>l8Ap3ETت]G&Cσ Br8sy'b!9- r|D$#w)K9˃^N:CZzl: \Qt/Ak=\|3u]|q&vk &z.AO?3<~~xfY[}^F/_cH>yʤ;Ufo<_n1dA׍uYQd6"շwi⩊_O|mu~1wve)(ۋI:MnY%@!\ Lr|Jy B($u=1aOguN!B9s>7WTrCqgoa*wx&noFFD4\`=&i|Q>X֟Ms*V Z*[I_NpgV? A6r\}ւ%1s|f\ OS~Yu`Zś%_KSQ୙_UTvX'+ c<*nȧ7|xY8~;Z w+7 H|v㣼*^TOw@g&c3*(&)>n(.1j@|j2ԋF=T|8WpCG"b{;!Ve>惋 Ldx%;_BFyh>zŜA GzR u_BwFL^Sq X8)j Kh/zZ[ۮU\4>Mngn;|&pNyCKIFDo^sw/+qNvlf?g5# HSAfid=3)32ἂ)5BPRʹ[EaXV,,As)>CPׇPII S"8Ї4$3 lT5ծA)ٹˠ{L*3[*hŃU!`gƏoÚGPRe~Q_n7K͑_xDߐIłߟ~ſOT[7fjV I3/ 012\4uπ{9FC)=Jay;ټ5]KlqWgaБ=YbN~{oɾqݹ}``+p3"3bDBKpODIe2FLog6jog+u2۫ly[#k6κ:wŪ~즓!0u7YUzl62 ^62;"e-p6;A3DaXǐdnmT$lJQb=&9$,V4t5ʂ&c"gN#:Vs;gOFۭ^n-0@2L8Z1u͚Յ vݏq?q2K =fm#}꘷ij눕zQ%Ls ϸX;^4c.p~{f(~>Nu4H8͂&)_hwVAQ@sB=R4('$Kmy#u6 af}vv7O"t[y!wSf Sa0Bq֗=3vO.vc_O/5Ha%bRra&$eDlb m:޿^ckqȱFՈf8rsHr skT8[b<ޟhM)O>5yPWC(dcv-U% q QoN{2x}:~ 4 ռrѴu cCϭ/]}XC cXN*%^ XC{Aϡ@prQwHj! l\i4/'I¶ٓXicY1_[j 풕ጿ zz׻'+q69bǛ z-TU- [˸4uBe!Ղ*k8*!Vq- cYs; l}xF Ү ?&},TKuxk%{wʱ iO=i$0/ PB# Ĕ=rVAР iՁ4ijDQqCP;[; ͒1m;âg#^>`+.YT􂎣Ri!bx;Ya=Ep}Bj,3HDɚX:Xv)Ny ="Vx1SX^q$pgy+w 9CM:ݱu C6.TCNrVlA"OO:t <AL!2ƌ ^`CaAfintK[ln8Ξ}[?W3:nv˾ hwzj Pѳ *3vXP8ι8tM.B,;g_8d_FiwNinRp{3H}4^Q2V yӈ/u?ػD4D} >3Ek875mkFM N )4yLHa$G6GÐK9.+V VzU8U5gcEp}?2-kfTd7؝tzvzC3tuB{CA>r?;=:3qͲ#<j9&bl;j>g赗|;#|oDa1p@2{ʊ䗹NV#^]bRN'w1Q{Eބf=f*!Lڪ2} JnqBRz7|V^ISJm['mU)q.AӫQe$j|U]`ԁW*pZnWUdñh+0ia,,C&W#)eB )I& *uEɷ +&~nMNj\FrߚޏoKnjfr}_g-zRYA%^;)^76'Z͖l3b4z<4ROnTĖCMfqh}䦣ex.ڀj1g}7v@rF~E_iˀ?kax,*HLuf<\ u۠-CU}udeŌ?!^9ĻDÍx!yR0*h<,8.]z'>8 `Ra<Қg`r hZ_K+gtVO1&o!L;:-kR䋹VfuF;krI3Z."4, APOPhǖKLIAN 젰 @☼4G \>XzW~9>0ڰ^ɸ1bOXHH}dNBȤTP9Lp%:n^$SsaX!>XQ dY2J:@ w;KO~g/Z:?u\rI'eLv@ eJ@C=Mu6w:<g,a7=&B/`v 8S_XvJD5M͢ c!qzN,kuqT+X԰bѳT=h sp23{]j|[کiȃx߲ƈ\Np`ōN֖=n9)T2H  VY1kW24-0djdoM oD;}gSzbT[swj{Ȫ~hY%)H5gY=M3]q!Ti?oNuG.{/n}.ܟ|Fq~,ꎳ]׃goăe",.=PdX&%nS>$J/1~e!E߹vd˾\eF%-!#?x>g*\ANNh%ײz(AU0 O]Lz/=h3z _> ϜE2'+Xbz]B Jd)|y$_ɗG|y$_ɗGO x|ݝķ{Y\eI||>?Ey9.>7.*<o/ ZYsJnTN,A 4N.ssz$J&kk Z=B嵒X?qXobb؂J8`&@jsX.LCK8/28Ksi; dZ9>n5cfͿ?mY/88U02P>}LrHqgh#$1*d{k@P$^X! 0rFV9+'; BJR؋݊qQ;*oR U̢{4YD$QY0͈HZHG$y`C¬! qe75SH+kA!a! (m=q`K,c7$cR@'Au`(=[N68 c;NJCb6kIZfI˒%- ZEX6ê݃2[d<+%t:h=_N5d"'x10鯳BM}؄e ?>On[Fj4u_G?WxPjigVϳi-FէDoSI[)9r$H%qו+#^̞2!P"-h]a[b)$"Zۨ(^2ZY 5ùfhprp-PY u2"S3E]*^ dŗz"}%n{0*ĕ\ж[L;o,te/#R3FG H2#/qZ0V ^@3-!p & 6$#ɤ3ΰ(FPpo%Od[mօޏVpDo׌YkJeU}wipFA0!P.E5fNS ) lx_D/"Uϡ@.{o`\CWRﳔz_ȡ#&{SŒAwɃ[[̣[O*AGbq7>F12ǧӇk}dcsoQVB䗇ߌ8a~fmm5wN;~gݭ*miBc|57y0ʌFbs8 ,.sj-$✺:PXE7a,=G7JR)\+Sq(nܣ! ywpAo1K W^TtY,7W)7p灅6~ I(- J1P 8k0@ePr |߹o?ݯZ>^2{8^bӾr#50a-3Rl:,\^\ŵrY+ ř%1ebw S Kw^}hJm$ڷ<7)ɞm:.}urפj)^wDBvDx],~J t L5uXxv5XX6X4ݩ y_Wb&NZk QMyQ|Nx{J6F]Nةv ~ sUג|XB_I+/T9.(cWox>ʇXqyG=@B!S.t!@({*_qsleկ_#K:g3$UGe9EZV:u*T$YBAgFXETRY  ^B2sn D` v7zi"98h%SǬCT`z+mrt6yZqgk-NLJJ>8dlbղ~livruAK6%-dف݂$W6V8;aRXn(+$ R -BSL#2\xCqBOQmfdt.E cG_HZ"wi )84N!gAWFda0NPc@$ ~P#iv4l2(pa= Jvp Jp4O[9pKڐ;Blg*?0uY"fܻM|?NQdOV,_ο?-g2F7]2SSWK)`0X,W}7RS;ztFO,gQcDn?^3WOx>b'n_lW iVfIT~Y~y9m 1`ă|]{oG*~0nn.,ncQۇW=Rŗȡ( DTu(Qo k\e 8tb&ſ !L? ϝ]֓WrnE35he#a_a ]ht*OjJ#[6E(NΑPXtrq֋OHax߰KxڴxxooM̆W+zD[`OGU݃n`~s(ErrWj!g!Hݿb&ϹSvrSw]N67?b7hYlٹj_ܯG|[jYu|?oPw3WP4bUuK [A1 IJq "I,$h#; *QJ$#S$8Cnm͑MYhm9?%';6(ց 1퀁B-7L kKuUt6g^2V6n|ޒw38u=P7Evo?6yZ u٨AʱCGgBGh8W!]X^NZ_zYIU@5d;Ht$ZzRHtPc 'kq`О$i(,y ')SR 4Jc+׭B".%eKuq)&AUI !#zj1!15ϴ҄'2$jm9!. ;Da#m}&'ɟ W9V y2꠺/Tb8ι1B -/╺h HGK""[zmd>5  ҧ̒uHRI{~ceZ}/|Nn @Z)CT9RFeQG#,J|W#4trj"^bA)n]CWFş0.8.+?Q' WΡRykWπ6ٻu|5s+?d9i&uijNzbى7{}AK/|rg5d[vn;w&dPKpƒ$Kx9si $wGg>VDbCLZ'J"Mm V(=xåynRʹy9]'|I'UgUϲiۄ CuAO(Ge%NZBL{%j#K12ÄmWrlcmOY<:cu{BTjK$-K*R(Zm܇&iSѨĄ 48cTpAϑOʦ#}*ɸQ,˅DNCF&s9VC s I3>H<˩D-I`J c*IK#>DWp Q)n֪=n> ^}ܲDU))% m{*1Nko뢴 $1Po)#Z X):ϣe&2xM 3#ȌTuD\?FPQׇ!( KPf Rzf\"V`_NkUИ?Bt6ׯq8G:H@)h;I`Z)]yݨn8CUy ;E]hK}\B>Ky@Q{z ,֗)32zaa%*`G>,c-X!swjRiYZh)d z,:cV8"2N s&{h8ᨸ}$ #͗H4F]*DAZl:B(PL+ N Q/D֝w j]Z%u j]ZvYvY)4b.ACx]Z%u j]Z%u j]Z7Q|.g|.g|T* 3ni^^sKU,Nyh[@ՅQ9b-[MO=0_tvz_sɷ_:"!3\; W3 V',N&CŴ^mJw-I*m<3Ȗm{\f) W0v˭n6[:8]_ul]fLyNRQG[VԨѻgajǜǜI 0~hDllxI>8Bfn^Ծ1 o3cJflbzGӖqsz'NPiIYEN:u(Ze0+<΁ՠ;q88i7 bC6H#90$az792Sr㶷^γƃ|LNN@&KŒ-Pm|.}qa7ڟ.] }Ɂ踢cjr%qr3DMƌ؜rjKS$!xk36x/Zb=.yJDZ%}=w?oIE+_z`ZC廘r@Әk:ISjeKi)ܚt[MasC[!Fy#hu)xW_ܯG|[jYu|?oPw3WP4bUu#=[ACZ<=m T#IdyT&N$1F0I4741J^)$xttM9)k!B$Ekv\TQ'ZfA{`* D•_uv=/*ȣ58(C i3c1A77aZ:[itƠ1ƠAk,./l.ߒ @r|Z˲Fyx[ldI6#Qɮb*IņGu 0,spK*gGZ?Y/4Nuww2n,dqщ=YG,>ue.a07 QP‚7WK ,qU$Nz4CDD ^Hqf!LGKr$ C5eL0,z=< 0V @h5+WE!)N<ݐhu:#H5uɳ/p&w"vDGP"`'<64=:NWoL"!B,dlHUniS @u5A!u(g&Y+HO6Iknɔ[ A8Xl}KOuO B77_) 4{_~ f;ZBo\z˟ΉRyϮmzgpQWm %s#L+`|sZ8ś>k.Śii)ő}9Q0v ^uȕa8Iur]1m+ 6%;Q>o3O7)n:4L@>B%^^|V',_g^V^O3W|uxf^apQ4NScp@ :g(@,|(QY-pxV_RY zʤ"$eXA%k2>9.h-D<b\ T:RDrьiE6~h lȝŹn㓝4%`kY驩UdCħᅰ`}Q=/WSjBH-,jSZcƠSbTrKP {1v=km(7})PP6iOTv0>דnZkG pgy_RRb&hhF&JVn;Ұ$iAa*S e= )D%L{6ĺD mT֖а'!N:{G:fr jIRQ5D%UxcLiN RZiR`0ߏz嬡?"z??M5,|zBU.7Pļ|>҅ϘOV~QϜz>LivdR2K+)=*Qi%$|e;0ePej%Sd2ط mp5;n-] F?r{BfV8 Sog+}n]M[osXtf7;l:ѣҺۖIϏww=z^h]["2ǖԳ|Gƻ)PK9ŕYo)~=gM-v1ϋN;j.Brl&w1.F׿қƷ>x2vCWZL4ADnUV^o_ߕ8)ˬ~J{A+`IWN&_ts7 .t\XIWcg}5xxa3X$2*R &N &dHf!Hu mm"X@qzPa5ѸSn& [ećif99&\"9P*FVG#$K3J1& yR$=;i7+_\ǧfH'' |yxwXUVW-_ :z?MZeiic U>X6ϧ|fV6*&##;$&)yAQ P;Pe=I&XU.ol8e%)Ĩ5r:A µ g-~ lG~>A:\' .qg5jS, Ahȷzc[pQ(PAISwJAa QDp@IJt(BB>y 91j~5Jf5T(RR,:OX`]C%tx8HhJ^=.y18:>~gXZHí.`!W"T'r™*12w>TN bELJ%LIQ7!XŬћMfglnVh%Y[!?{UӛNoRov SU\nO[8rېLU [,LRxs!c{r$L1jxH- 0ڋxyg&짼m9+IK>I<ٲS*-<C_/E_7.}u7p[(pR 6a^w56hM $/7͌*R[HAs/QфDenLf 8:ze&mo]R@T'cw φ9E+#MgWusퟦ^w'J7 : &|֏. 'Q$*´ 2_i].0ໜS~/ߥ9De6gHJMU}-!x?1$#9*X]z eJKU,)(ܔp.q(v6ѣt4䊥VBt){5b i)LE+{5cㆺJf )R'J3^%)XHڀF̌iQHQNY 8>8#'Ts 3Ncq9x9AP( D|`H1')0918$')й@Fs<y%2wTV^hң>|06@ ucoyTϽ&J'˳̛5=sGA ab}F9Ts :NzseqLqq!QGgmnzd}.<ЕQS(nuP zo}uE?7_r'_hWp!-'yUDr|=#aa#rq$G/l];7x'h.T{ţ9j=!HtByJ~Sp.dފyI#T~?_=HS26i2}W 8hgnc7+)Py}s 񷂶e^>=lAT,FIdx:e^kgEt4ӛhrNJ8txkm۱]yN'{^GA\AFtO1   Ȥ{pt"J(&/J+K&y{Kg^p~̵1be9b%U$(frJhu+!'DAC Uj]Qm$"*NYWа:-$Bz"X cF%ZAEC9rcpN+ ?NjH\kJ꺠 1PKZc Θ c_k|"t{<@'>:m #~}ojC&r>FGK.T1ZG `vK9)"w@@) Mrزb-y7ɭ&W`z.%4Ax]]|6 tsp6¿.c+ه7dvZЖoR- woM 86?=9p21dϖfw'w.)r|x27u%^5}lH뉐 H1.;s<gc_>:Y_G$-y۹<ϙm{@YƵIZ˸D?Wu+<[ ,Q>BK/gإӳO7?40;BmH][#}S逶+K*/|u'$ủc )VUEU%A3uQn͓j>?Euei [2WU m z %'lv*+E]1AնH !SƿlʅUP8 ,IM z%n-CE˫4o6f'=RLu>R쾝O]!FXtuOs&χSyWȝO;[M{ϯ)']rCWw5}Z*D|\ؕl,!GZRpV8p8{2ևݮ9~8>ZbJ}JV-.h隖S! l/<"r$rgM|p!~EVu7L%']hSE-m">+|c_vuZ iuRgG6G9&O+.>%v;IID=|l|𴃧<1=GpcO׉Ͼ@ώ'/5{̹[ qb|Rx\i3^,c+2vX‡5q6=T^m/Y0Z/*`}?W^/M;,7jf=qkn@Xz5qhW+? KV_z~9X,G\ _0ӏ߯;]TgIۍ< 8tz؈z8}ȷJ??zCN WƷH>tm)6hه#K1">fƸ(SAgwrLv6we%^w['|<_.VX ]Y6^62Vpt9 ķ=k/_<5rͥ0%kԽzzYj±63~S\m3ꫀqKVǢI|F:B(6RnW}L4YO\]DX5_g1]n˩1r8b(WZ:YEic E,2j+5z6IքKGzZG?~Z_)\# 0+!Ȕ^+V^vG'|\KlUe&'b`Uv^MHFx$ \i\w9"GhCt*Z,۔QqYUmYRu"[NW[)eѥ]3 jr&W00*Ȑ8aUD4TrkIimyPlOi{*-Tujݻ@)[՘U#@dit2Q0ED > 0we[%BΌa YM1Zs2DIjºl"h c@Z%OCTBn6Luڵ%BҰ&aB1V ";Dp ^e4Z)T,ٚ$0JRyx/dp Dءu0btϠo1rwTh׺=[CGDX#d֎!%r\ Id$(Uꌑs $-^Tkj;vio gBDp7GRFਫf;yZê]Q@IBP˒ѧFd0 ϋ ڷ:UHlH 2MBDҪ X, v'UP8oX+՗⼵F@<[g3HV[Ib],BU HN1FFJkziwD1!aDmea; Q&>㋶ FK ܠmNzx3w 1> j0PxabpdPztEVdEHY#Xe L͆ɠ<"Y7-q _UXP@J (x$H&rZ&d^eNJL%1ZxF$zZ{[x nEf6m‚չHprڝhtŌ F,0Zj$΃pzPDZQ82G Ʈݴ6 5Hd&)hhS`@?x2ZF 7Qd% |W}U}0Y$,)dl)dMXs2?aev,YaI+foY B)LU6EA܋,:),e 修tf h/mkٲe&Đ% aumޕ)Nˣy8|Ӵˣӓ16Maֈ`AT4[hYT٘01V(x6 ,Fom IVA%RVemg7(? eW*o5p78%<%2` rh9v昀t#CK܀D;)w A( $BD6z DzE@z-YaӢMQ!| "($+)!* <(@b'AZ.3U {f(j(c)2QDY8z"0?y ڈR&c̩) X;uFAH@pY )r@KUڜIqILvDjJh-µf}kP|P *7AJrG >F.tuUtat0<` zڼO#0%7aDD,dW5A=GlPb.PqHBU5/ !´xolW1iĪXi(D} 1 0&)&0y:tbIˉ- Y%ySg6a!wu1Xo[ P GRzz7_9@$MynAjd(fӜPp$%`ߧ44權7ge&cY'o*S-WXh?uN T%yXgŻQ|N TSh{m1ڢ`XFS,)+.@>;aZSV˯o Khl8j:Jv/j7~򥑽ID 8@3.&cP(͛187TНz0STBg0_"3/:TdrB(8%ʼ-OK*4FEfϹʟ)͛|mEJ})Uu*3U10!=ؔuش 9'xr6Up,eR >-ڨ% ޗKj)` s2 dكy1]> >ˇ3V* *v{RT_*,er1Krr{UR*\^r%7gB&w_~1gg+P''J?&9dgLY?o@Npʍs,18YX%"RZ\3w99xa>&r8.F V^8ꭵJ桭祎ChAUjo*͍XVG6FϔׄȵN AobV^2+*T>$ReF  &EI:`A^C(G,}И<.i,pV>O(f{.ʠrƤRPJό+]"LVoO*Cʀbk Ngoyuas Nmp.o(d!EW#ع?%$.F%eYe7^x8\好t^D૞\sƼ?/~x s`x䘃i,X+E9h-RqRt"X:pėyV ~zvt}'>L5a1{ߎB7Y\pk1}5x{{֢ڑ؆6U}^ݙm=6\@Z^Ur|l:,'O$bph7PIK eLE"S< VRX@&`@ YIdZTr%1+}ary2YnBYy>uꖭzhސ9gfE`llj<n;זϓE[n{.nvgKv3/g+IaFƣs.b(Ng7hƪGQbH.$$uE-o4R~q0[6@˖Yh66BOtBg6dr 9衻2O^fxs۳'4??H/DYz0dDεT&*RbK/-+y o{oT1y1J;,pv^\N+{WetyyڇZZud "KguRUb2 :g$t\Υ*D7\:oe G|o힇5 rw4]Ie~yLbqn=Y]5Kz{ZcPcNJ.&J9[B*^ PLEa£"[]4`;ۯÄִUsg~ 7Rѿ}uVFjsJ\`XEf^(Y u %>m=hlX1j}(O=&litm^ -,:PIH,+=Ҁ`3b>_V|e[J[ʈ&_MW_ ]Ї-xzwT]ּ|>~=ѹ3+ٲ eESm[{A_YY6S>[^߿݆5T>[U*˨W16WټBWlS6YVWFhrvF Fn<@M!>y,>?liC3JA>>R?(j|=,Kbuylǜ}FC,qi8f1m;$vo-z,{[  Ɏ 7yGfy pp~7'LE J^Mw""x[v <$sednLL̗ %t\ ^XP(Y(!1`^}l;y+m}C7P0ic2Y(If*Ǖnvin*Y@}tw;I] F~b N}qBQv(6stNĶ]'&)q[+u7{/SQޮRlbꦊ=[o]M~U:чvcUdC hܾ,Q@hQ58<87 ec!8CDB7keg$hxPӐL="1p~C|]] sۖ=Yxc-EٖPcMIszͫڳtkAoX 9f5ڿP=7Rj?,i:XlSk$Ɋ0cKX(6>'Bĸے?] 8IEd64!G3~#Fy nMQW2xb9+r=1CiVipۯ N\m5u)QNpol99IŔMGmi-g7rA.G+L06||Ob{Y_Q}~uwsÎiYOu$ "%a6(Wĺ`}d^kD9^XL*(L:sܚkk Pe-LsSs%I"EoJ= B)9M$"ٿY 5`e#aKrr;N~&wqFU<9+GD@H.ԁn'Vj 7~:V)iJ&`5ߤ|隍)7V96&wػ;Eq畎Q ɵ5{r`pʳg'whtAo.%0l`~<#(_Td5z$M\`^=H_`.2:KV=~;->|}q+3RyۛS#*mx Q+B`)QIWY$t`|Q:id:"9Ps{nhٔrF{I–kRq *yBe`Q"H>10WQOAy 䱨Q=|Y \;*6:*XT2AR˞ ?|PJ}݆RʳYGBC Ui1D>Wz%02iN&4;^ HxH-iXR j*hLśC=EvKwW=)Q7Ru}4DZ0_FEL1PT=Is&6Y9@I\hzS7OsNE)yȂVٹ9vb[LE {Xiڏ\q|Ui]QV/546V Wϯ_#g'~FzsVovjqlmݛ͝5[W7nShxo6f=NBzpR7_'W&BXE_?j~~$\1C/H?KKpy)9*0bUfb`Bh41vG&0}Qz댴1D߯cH\dCcrƔm.EP~8Ors#b-ŧ"8CCy3(kX78)spC^Moz=ƾ۬v;$}{/foGSyope]onhah5wMj?.FM>V˳~eEWNјTf,vq.kmFE"x3`b}dax5%ER_`dYݔ%Ou("YU*c llr,q8w L*A@Q:|<5w3eOs|qj6x0C*FiE.x뜡O%o$S69kE[*d1х[(-HQ&%4 #D%09Ǣ\DRk /2IE&eXN٦"E4eF1.k*N)}Rlx;;=Vyo=b|rhVϡ^v<5osUWImn wrkQϺ,xk̽|~)pLE|$Hk;yt#^9(E r4rάf E5*#ZXtH X!ǂAV R6*]3nRN qơtᜢ<ɧP^S;n`^q웬Փ?mP^oO]Br 5,k4.UY(!(fJF Z4(D%|`E;`lT;-(;ft<#}/Bx_27$c&WT д85I*ʃ ༆(G=⹄U鰭Ԧˁ3vTCh)_K%0fB`HD#*h_(CnuZr]6Ahy'&g3Gr0]1뷍}fSft»w].DFjtfCh;_!Oی>έia[vM;o OB0 67{x~gt`>Ҩ7TlV*^+}y/7 }9Wζ! '͙ xj:^y!lRWWA[]BW,:Ƃkw ly&Ug7ɩlTL0F'ƝPXaPI|J^PP 3AH\&A/l{L@'+76H WƁbԚq: jac`]n4A@Łk8It{p>8QJr`)5-KK U~K$EsIA~9, ` QDp@qeP$s=  WcάP] ң :Y\s)c@Ldm2,D:# ϼ%Uy24YI&\e䒄JW8ÁD= $:ǁ;Q&dWzZ};"欰9Am8D@\>XPsys!G%eQiUqt*`IWN&_ts7MiSyڀ癦P%Xi)zT&*%8hQbDM8nKHFL0ǃ@$SB3⋙b#MC#pĤ9;Z#g0 >S:;ܲĝz8~nx9z{f&z~C '޺p9#t/gG& =Z3A; S1 ۄ݄1g~lҀ1gw.u756LD 뤵{{y٥C@0+jܞ8Ox N F߁\Dߝ|8S56hM $o1h-3yoslc$ਏhB"Ӥh)[bB4ɧ+ ŬU{`߄7aUH[S^$= k85eʚMgκ ͒ϼLnl*:{#7Px7?xxWxr|PwWd|?̣f?N&bAG-kE5OLE'oYK(,76Q; $.`)o i09JC" cBtyeq¼T[A*+HPң>\  u$1()Q:zƉJX+IW0!v>| s!BPGeC8@;Kո{..wk7Oݎfs?֑%OpRI5%~XW< '/۩Ji77"hnʐ D"cfܣ{t0 n1>yr 6N:/u()5'!(IFom/Gio$FL_\+BetB:?~O?eH>a .>.H>sjta!Ux\j0|Vgx3dټ7ě $A{miҢ|RlQTBz_7vJYUh[X+"*ZD)k#ٵd%]P%WA:zd屙'!;"-)Y * 43sBXI=^NY˵w]!&jiXß3&H|2B+: I١ҒB:#󆛛9=7|~5HMGC*~zvں9щ#4 >9~ꋁAj Qdyp;buۍ+C5<⮀DvмK\cIfzpobՃ[|ћxpkGVރH4u)QP$aEϹ''JxM1UQ Aznl Hpr4 L$xZĜS?SRJ_.FvnY wC8W:HDNMNIhnA/ucQ55U>A6֐HR gsSQk/hm7,}n-cr\X5wli=F(|BI <24rg2VăCt;gvf݇]>^ӵ|9ϴK/9+@6|09Y6km#9 _r~TSvFK֙dR~_II|Xly}tdz'B͇ࣇG [\ hJmWlwPp-xdRݞ׼TX,m5~3|$R˩)6Ws}+Ѿ\ɝsZfh2߲/U0`}L)[@zSa%E `{l[%o}x[uCq[Cv/ Э? 8[:hW0^C>1al-tkK6j'R9|7lH2"&Y_N,Vn!(>f]kQ2(b@|c9&.j:Y4_z빊OxT6uۺ?5`DIWM),Z~}$MvU{R!g׶ԵI{MRT^Lry㉻K$1C픃2EzWZY)N*JFlIk>թ(H]J0'W P XHHͦ]2nF)G(㩲Pd݌n?VL.`{IU/}|70?&/@b7t@5jIrTJ4el4 $+}41!F.1k3ˁZpXb N u`RU#HtPB 8Tt4l[pec= 3@:kPcJdRFX]k+pb-Ղ8zN9CrYscMrv"jVuN™`"#E8qQ*[?v4a#ٴU\}jy>, >~v »w~[$p25etN 锛0Iۀ .˟魉v@}G3!;\ڕk ŷX^! jd\: D`a֕>Y-b,(XuP1/#!jJKy:|3,OyS<ݣ>Ayxf9Nkp$Ã7މ _&%’Γ13@(yq57'NޜH( HA! I%\:7 Wʐb\y! چq,;=ƬPrvnzv >t;wb6m 6]G}&h'w8ݭͦwos42=ëy7NFw#7vv>yqgF^s3=77bͻN+9| *kXl#n|~=/7o_(͕] J(p(g'x,-)7!q ?> W'EHBG }HՎB Z*-Lg[|6N7U|/oӖksBi@ԣP BWɢ"GGAhvTz8c64n_jmN1P(7ArS S,g՗'~,f+\E](IH9PD-ZY lI(S'[ee0c2h>rvki;w`4I9Il:;T~ԪF7ëuI#?R8nUL ;Ycߜ_R#6Xe륻wv4sb K B,5r]m*#DԐ¦BOJ; N ,5Py)yK0$EI'h+ͦ!9%Nq?8$=*]E"+uXS1sAO4N!I_PE::M޺)in"q.D F!,P=8W#rN:[9aIX ^oeӚ# 6:itJ<b eBBWgH#ТB@U$j ! X){w ^wJZ:U A9X@`~j%.t0Q _tDŽq^ Бyp=k3Oy7W?/`NA{vޙX/8W ;'M},V(.htVJ+Oɻmf}^,K6|ou%C-hʲ rVu\dA@o=`|Kwv^|=fL[$j:TԂ,S"lT>'SrE͋pŒH@$L! F!<C[ȆK>;1YjWij$[U2#1kAE RƢ%O"Aft* H4MgGipSʍOWx_}M᲻ŲYc;"Ïo,H?gHƭ;joD-32:G+!IjDAӤj(*9\'RR s^k)t\o4_&|p}WF/ӟ*,X3z\?nm[pq4S#>Q~奼~SiChrѠ bAIdFdX%kGF,M :Js$,֖)Ť7ܣQ)I^MrG$$,'kt-Î4O rck|l7ϓL׺t/!RaiM'V8&Qc+JP9n(٠0^Ȣ1 Qӈ~im[zm¼3Bf׏-[T^zy$I:>s\ܽv 6ɁMGEeECA //@Wgc/ͲHoߦ(0s]<c of*0K^nNRnh{+ 5@~*avNF[LE Gl\C^- |IJυ"1.QxŅ'}LDGS.wQh)a3^C25QQ:*darĞ"tPq% ZnDAl NpolKP'-c9;/dN.#;$gJ!%%&k[df|iÚrXŔ "<|tP\IuWc,#`S.%c"l!$/:~PU+Qss)m;$*H єjp(R-v'${< vu::Nȱu0 ! XkA$ :J/$hp K&=?WףЛ2M )09|a >$ 8$t>Rx>="8aAK u+H9)t02DT_+gGK=G*l*j8IGMaN&΁NL_", i4-e@K>`jv]7;%L;M/#Xs 6 E)PPB~UQ,u1cv9TRPiBV=&ƾa&:J$a;,fWal[L7?jAY1X-S@]|ڨw}O5qe6?bB?BJ~u/UV7( Y{qAGf?r$?׶_Kj18oHoVyFqF :{9HWDلՊ#gϪ3ڲ6P@͑PXa5x 9[0;S,^__ݬOUMӨ0ʠr@OW:ဨX))mF o)""ܛ=jW aCx}DQg 1)jd&-IHTP$@&oI_ 4(7qɍTyĄ4;#gɝfꦹT9;}ۣn4^HkiX6)C>O[j[T39k)rsi0ЩVޞᔔrXeQS@7DYvS~x5%JQ9ü L6NS [}KZ/iRҞ@(o*j:G'sA:r>H<~ +9P*H)u@g%YIPt;T>ݲOtT ƬpB*JE 2 'ﵢ(n(۽%qw=zZpZϙikfLZݧ[b 7Jyo3$)c }as]{򴛖78yif~_5x~ [Ne)v6:p+ĥPngǟݵ^qN|&O>.{|s)8D WsW~b]ԓ} !R:):MhFСtqR'X^xL݉Fv㝮1Ix u`5`Uq>pJF *ih{jfkLiP7.Ci7'|naO 0{iK$-K*R(=sM1IJpQ#U)8ģ@k0X yU6uۀ&LGpGm`*Q1s"7q@M$*\"19g]('SJa7A}>>DaU+t{>ĺ/Q3fi)3n4(:%7(48A8^Ȥs0 1P\HD";`L9Ru΀gG'/LGfL U u,]W .u}מ|!RCVMz,YZ#ܳwNx󲩫XR 6ƙRpJD(~1 ЄV{c'w- ~a㿳1\A@3|$R3(t\%I}Hdf^rsi녥!R"1DҀ"]ɮ١FYq2{͟N(QH.IW(x>D J^8JsoBGdNg)8)I5$żpIA~Ds CbA\b;4!1e<%NlB jyton]# (RI.䆹yô˜ftr.Y%v>%{U  I%|% 2ETA ϗ# t MdwM!e=;pWE>xpJ:!gA/+s頗;!^b=k՟ lU&W碮2TZ֫2M쵍5-//ǣTZdD ͳ_F?4'fơC(ĊK-sشbp _2~? ] Iՠ^igi&CàB`)0I)2sQ>aI5i}s/H^7`w^1DtZXZ20oT7^///^}]֣jqb2d.pWEH.-DL _Vp-qT_6w[~Fh(-qL{gɲ-ās~mEqjtR-9o1|ja6E,rLеM8{̚/}Xq*}ct2WGHJ[X r,i`dV֛w8_1.Е 0ḟfa13kv*۱~)vR$&xOXf e 弄܊#p)܂dn:wgϻ;V _ӷ5$'vM>\O|D')OR:/פ|kRvɃ^2iHm2htEx-Bm3BgUe]g%/Rpނ$- 6>/jmMg(\e_h%{>9 XJl\-ئdb߿0mkX]$<>T]~p@KtzC_|KK[{ 4r㲿lMԬLjyա]9Ҵo!͍}x(`i>VEH7TKnۘr:3yIm2VX 4x&7q6lNV8C}̽p\={额3Gz P#=x{{Ƽmhmhf.ً:%8B B^#P+͹;B :Gtrn@ RWD0bU!٥B箮 w-Ÿ;-$^B+SVn`rb3»c)nxzC4liq =S[p+M?ERޛAïs5k|j`ɪIM\m==GGo8H'9$X3.'ׁU+ '{yT5`@j\%?Dќ]on4ߦfb3&;W ZzC=~ٚn>\ P w"X#\ .rӨ5ܑ0Qi<~;HL篮 r. .E]^٩7F]R|Ī\.F]箮EKސLGtuhnB)n>~]g0kXQU4&UUW9tQjCo!w%!kd29'9 @kԉC(78s̠dAgQ iqs@Z #6 $4>1uOb#_ѢNo#MlfbH7~G#_<Ex܃+)}/q=R?|" _/_:!1\*&4k:W9:]RnmMݯ=Ξ{;h'gR{F $#Xł#LmcP;hB B*ЕDc.@% ,LHu`+ݕq;F-:dpcLbdSZZ"Q+p"%LjNɥdd{6yo1,F3ep\\5rWPr[7ԬU u]#dm{UH4'U[=:+76yHlg鎺%쎮]Gu4ktlo"6.,R\Bۻ.~4+"\0 u=ҺjZos0r;}6eE-)ƻ;:'yv~(67{f W 8ꎎvL}G<Ϝ5gß5ݷ nM% Ǐ0v/Xnm~\1k:e83z9SgHcvSYX.N~lvF0&錇> 6IVe\謏ŏ}`F9tJRT"?g>zs;x@X-ہj,vAzAm/ QZ '.ÓithDh#F7TBR!頍qBxe'y4)~=H=&cYUos㞳~6/h43 DsҒ2rPkDΌud|h'lC`iWB[re9d h:=3$iROkF a.tEOQ4NYā%W)E@wk &.ŧc$F+\BoB.GV 76Brd# !*$YMGm!G",+1g$)K ; DkGf| e^C3*i%l(KCRl8>f*tBmr_35@*Lv@kkјL1"3\}~дYa0n+gS ( # rc\{,pمyf="v3-wV%2ʊ\L28l |6h͹ J8k8-rvKZ 0yjb嫻zٮa'rilj#+: R,}Ÿ&*ʸ )gZzr8*wd~yxрA:Ȕ:bL̢+5(F#,"ڋpyw2oo7,R3mO`"JRbDtNc9L R "X|rgq<*kRPh :k02XKDH"JI4*B TylƩ8\c uZm W̺`}^k0 h&G4(jcKc'a.!´"I -W$R&@WrQދZ^tԮ mu iKOFb ia\H8;Zܖ{͞H`S&.Ί9|t-RKM>1laa MJ^utwRYwIv;6z6 tӥK1ɟԬXͫ$ 0V] bcV<̯@n$Q 㦛R|S L3q07 hX{Ҿ^2::c֬=Jlޣ|׏sk->QǝƉtw/yu.|,\Hi~]%-q}o&oM7zHMָf/;=Q_77.0HaBdW M`~EsRz+0d#@KՈ]j%t+7{v5yM*m5}6xrH#zK%f:0]G\C.ij='7~ OY%Ҡ.A!qZJu瑫7E?~[؛mLПNhӼ ka`4BkXU `!Vm乜U'w1iȥ{7"-g%Z\G/bs <ڤorZnYejY?~43Somgl.J!+h{Nn8Ƒ[<%YlET&[B0F0A.%N^)%q!+BUni> !b Qafˈ wW,^چlot9hڇD"qAt>g@Y p"Z)%Z$4$H*tHH \邰.E"EmFPB1&d?\ՙ Z#g7$ZBz!mx5!Qfg ƚ|xa f"~ωQp4CdO3Pm4UMr71m hHF:`00[ym> : r:KR9' Ia*To! o#D2MQbѠe(b5 9Tڼ'-}x5W%hiy\*OmѹAA=nRGzKrODWO U J]®G1%bd9bϬuIULdS[Ƭ:!U!Jҡ޵{)4!'zE20DJdnFJ:yОVt!ж5rvH'_ҫ8|PQx*cLmePyru߿j?)_.!E'u LA9)rЎ!攉{%FZ'jJ,Nx[)z{%z_lÄK9PY|7'oլq0ijz'ipF(Js*JVȄq§ ]gtigv8 3ɢ#B)Ґ`sfަFLx im&Z h ]-iWxlf_y}rk][JlX{|tD5GuAEz`!)Jv)Ez _DyEz۷_pfzKp%?9N38s+ȑ{)$?]Rއ6̇iGLdj[^*h)QI,UʊEfD߼,,ۜ`vaز--shSO2~5[įUaVlY՛7G7{zxJUAӓ{qXxxZxzFG ^&Kef>U1bN1"몡=zu~`ŪJJj D+SH?{p;ٕnԓ\ji<' y6.o/Ujʭ-WK6tY?c5gB|xHstvcu{Èߑqt5']d\<06~8^4Y{jbxN|ίs5i{GvÍx~OPiYA@'}튾V L͎H.5;SB 'F߀^7M j]4B@122KXp(<#1DÃ0W;0S3)FDhSL6v"|BMGoh-RȥAߦ^ʠUeZZq.80]!Q.a|y^ mi8Ϊ`jp)YݿީioZN!-{o?ꓺ+hZOZ8{}~uiiY-u6oFxѸyy7jW뻢O 92.#Lhrxf\"e?۠pw#S`' W׷`xk.[gQ^pY8%3xCs&/(18-ͭX"1Z(OҞbZg7ǯԧ6*2M TИ/!: y $:+@XMLQ##в\hˈU*Tdd8.i-cF3.1&TyĄ\]ErҒg In^˶E19uۂ޶صyx69,Z}}3P&gm{/5!R2]vOUޥ;)@嬱ʢvA!:*VkJP╢9sy'9xE-2:Ř'}Lǁ`r[ȘgA4) !*T Hņ2nF)$cW[ȫ-<)[Q~M^.wG ?%J;eI*7 RQt.2`&(hc A;Wv{l"]-(lE"0VPBڤ4Q #@ \&Uh*B/j߳vKUӪA)vŻںbq[uGmc'dI$hs[k~î|ؕaW>ʇ]+vî|د i}VT>ʇ]+vî|ؕB̂Si a〭'Çm$%B 6N1 У WJ7\+pMn W+pn WŬY+pUJ7\+pV WJ7\+ptÕnJ7\+ptÕn WJ7\ /  /٢+0ÙhGǑ ڵ3ۚ{Ym>w]wsW6k?'@Zc "n"-ݑia.wu1tm E-ad<o0]pv3qn&ueei@q~6 fjn 8M^ik7Zlar~M'.Cdb!F 7FHK_ J q!1WJ8=VJs Vql{"A Gf)S-ϛu3: {.ap,@5NuD m(4|i( '76IKPQ-pXl8L^_/Nq #]k[F}3 г=8il-2ӏ6n./f\0pN#nc!1RryJQ:Ef*϶BML3Aœ]MmW10i-y BEI*H єjppZ4G1c+P(0n8T BcYQWĺ`}d^k"qNgJ/$h0>$LDP]K( )09ra 2I T2KMRz^"Oѥ!bX8;Zꙍ8xVq4J>Ϥ|J^wI1+]~],>|C 8|+ Wt~D*AJ]C[^WԒp-;^s: &{/i,]OgzNl[g= n-/5zw M􁥲 .qFY₾kf_"J.Cx."y~(Ψ`/FvvM.ÙNݞMg>6c  u2#ԁ><\z9x9 El%`ƍp^X/Np o#γ$׫nVH HZWH[s QDbp>%*'IZj(c* pI  ϱՕJSg ώ; uGc((|M@(a'Xaj7`z5źPt5]MR"!ms?hH&F:Ko$A0T4 'W .E%֙S,SNA' eGCT9RFuQG#,J3̤o e{{s3{{fx^v'$FC]+޶ζ"!-!.OgfϻwOoe_%eP^9Dc=όkp9WFy@p;y Aԣ.:d珃AkbD0tl˭3ڨosr8b zAyo@Y"4&YcENBcr[)AWc eLL F꘬gA4) !*T Hņ2nF)$cW[ȫ-<)[Q~M^.xlw7KNüNhhty4^I!8EYIZ-uփ2*%Nr.PL(/eI4EFl6ҙA:aG "%Mh-RD6K gW\ F)8ͫծvc2ee)d" ZTkC0 a qjSnn~S5I] #dDT`8 1 #Sd$jņAp+ccW( [DQ-b%-,ڤ4Q #@ 3i*B.U(j߳vKUӪA)P\w;[(bߪ;jn{D>QM$;P"䖨I8 ǫ,?_P>:o&Jj}ԍyV7HhFR*A4DA&1`$6VKc`pbQ]tp~ZN&ʉMꇦ,~XP+"@{.%zL?KQF@l LNp,՚c`qUXwhQ% (9aQX%k UYp+Da7esUVr<4g,Y-ݞhnwm$W OJd&`0ob<֭m$3#Yu&KU Z]bds re*&}.}>u m.);ݯfO/zc/O(#;Bm|c救f|۫Z=y+Ay'1qe߻^~|Z^+an/nBcr~sd[,fӶgn#7?7jڹ=\R\ylw Q>X]I;*;&W] {'tr$ݜI7'A҅[1ؠ؀Yxb2 !**csVW*`?PY,'oG,3lEd1B ebD٣/Aemx*+yntvWM~v.0uJaS3cZ1#^==x u 7|R~/u.9wYuRTJe_Rm𯇱yN' |ǝm&8SF) 8PH, ⤼`ZArNlHR cw&!3!$pڵwe  K &\,Ш)Bki`[L=7R d);wliD!ˍX"qU5phh9! +*t  .-4I,8 Ό^*A,2נJj2:4Ӓv+F7}NljzX+LE&}﫥W`Aj6F]BNk;bεSTrRDnpc\Hzu_X6p,lv> 38x*/1 w(R\:p[׀PV6Fˮޗ0p玉 جȸXֱGN`gCcɳA`&; J) ~Xx|OVu7wx]6:6_D)ESxUpMN P(9AuM.ÖF).(6!S ra0CDNi#Fs)(DǶg K$'8eNl&:nvls\mtJ6FXQUhIkbŤZtuL\$ ȣݕԊoS,ImMhWl GڽЫ1Amyd7 ?^L~\YoYS䞭G0F_ ȟ&w7Y<2ӡ _ *~0| ϷtI8aUwZّltIucxBh !_~VA)P A;**ՇP<' QsTUꠑ"E*l-iH92J窳ΠlEbŤ {]gB8R48{n~{8$$zD-._l7/:CO O1+21>|mnv(J^\a-fe 'dթcQYEt45؂fELbϡƂ|-)e,FU ]s0qv[Z8 Mwyx>ebv`e3q&fkOxbB}訨yc獼,mN٢3&r^9J:T} wmñ`a1bERGV ½gwc.]S\XJJT)ĬsP8* Ɖ))>JjyTPsj 5jCaNk]EB%ǠZt9Y[T:F݊XTt!ѐ5tAyoR\%y0\  @sT:# ش/Iipcиk2f5?V" gdԖI[8Aߧ-sy=mdZj7x݂ALo9\m4 BW-yaX/x萸 .&&v?T.~C[hc Ṋ޴Hg쵠(G>:t3N˗xp6ܗr x*9ȥ6.+&W<[F^sW o7B{ojxE?]kUc/.)մiAsW6K-}/WE?ѵn1Ŋ^?{WHOdGjiX/[c7:,a$;Q(WYI1#/qCO?1l=L b4S$˖"6{rpˣ"7z[w'V=?-igܮغzݿyMWem)pHAVfG`֣Jk0)]2k3ONnom̀MJϏs hMӎkSRG: b,dƀ%:"_v=ͣ4~|8xbJ-;O;>zl8t%ps_e}(u3KVqhmTkP`U2.H_ t8}ntPhOv2Ș'B$Z0DҴ < RdpRH߫hNLe W!~nj*ꎿqWEԚ4h{xha^g֭ͼb,n:::Sןx vW#DmƗz"?uZD辟fPG.%JM嚛xo^`?{0)㲜L|WX@Mڬ|Mݽӊx[ןTNL7n\wM ${kq~$dAn+ hBpƣ @(uDyTDW!~G2 @3YV[.D ReVP+ d,W44UT )0E"E$\3d `{! F&諑ѯjw6>x3"|ءmaD}A~d"&!!wjGs; ;4ts}N[i|w7ݤe~4&5C v=P S p 2eI qә3l(w? 8\X {'wߓs׷ϟ'f ztJYN'Mm7AA=\K/O/}oh $Ky)S]'1-Ad!g6&u:&Kic dOJǺ)r[DhSyhʔYZ!<7VpcFv.N_t|i3Ix$|eкyzkHPoWFzġȸ-xNvxϊ !攉{FC c VN;:>Mk`\C2x+] Eyv͉웒g7q0/SyF/͵5jIӠSEY1]XN҉u4dd!l_jaY4ƻb6Ľc$4n4;)] 6'tF^߾ܸ~Wm [?_<4L 0Ow/J}) J{Bv?Nyi\|,/fqfvKQ7_ʥE1mh4Q1Ч7XFgwTjX`5R}_(춗?c~o>t,m0{=fc kC/%&GU`tY'7ܜ-5/hodU@;kJɌ򖿌ݺKtkbhшLPDW &˷wdv sŲ$t\]X:xc(Du픭tQRkN , "+5t\SЅftҸrV\{Inʬ8^G2I1#4AxĽv,<; ̹mL=)6o7_ zηLlWkr:lR”2-/Uh=pc@/hEf1:̑c$L ]տH8ǝ?FIg,0f28D G4G \BsbU0ssMB]H*[IW/&fCjlt3l;%sŪ'bʱXV{T4'bmN!>crkE;S'6/>NUP>$4:#΁d璬1k͓d"UՌښcژ% [jr1E3ILV+LJfFv͸DV}utE9EƏ-&wrM)%߭v}ş h߿\cK%d"уwew>yPQC]:YzBjsF.BŲ (^HQԦԦDkFh2{R"D"Y0v#Zl~˳&碵e;% Ĵ@T5΍>+0dyOYʖ?UaV`Vq !LHE #(}*@Fu2a5r֨x.FN#vq\\U#:6N $w݉D.zG,$472Ȓ@pC;"\%E]Y/N/vzU㢏. [j-MGZw"V:Y|rFՅ(l:'H?CjҠ @3>s(o2 Hg4Z*9hS14-T]nE&Y+|֥I_&f+ Չ ؑ3&n `I 7ZCj4qxm9,ugv;VR>쇼69lyDAJs&"8 H&|E 1$ƤS#g,uCbr^gp+>vQ cV]~m}JN-SOwݧ|"r#i$ ^Ύ~&2omn0:_F7{Vj>vumݴf~\l慑axa= |$ɷL1},?r㑅.6֜=lɗ*\vצ pwIz <&Srl@y W H~nM_ԨB&H7&79aM9ۤ2e&LF(! =+*GʒsP9* RfKXv@*è8iC,ǔeZN,hEAa"뽲\G}/(س1]vBI/ГFz 856˻}| L| ~(V15cgyLB!SI2/r@I*+t )q([#P/J >st,NFΎq!9IBKsDf}eQ0.=7%\t;iH MLC40pƱV1HB>p1pngz+L{k3}| k2bv"E̪dz yQ\ьBh Lo$UI`I#Dm r9JqLH=v2vF26Ll)dS̍еV9 N/NszaeτpK4RpAW>oi E24̫D94 ҥ(]pLy26R u&pe%I $̈́H:,g4izL/ ma@eHYrc,!,ttc Z %\eI5rvDC߾S0Y}0}ۇm%CȗVh9h>Ƿ1 Qo3vjM[\+!?ILr5B!"k`1p$)/" / uKU֣q^Gɩ]l9!7FDX./wVe:#p~bؔM1IXoWt,KA 'Є,dkgJɕ"Y<$l޷{ClLNjUXGh"by;[7P` A@CX0} nWy%pN|# Ӳ2`4܇cZP!JF+ AS\ZܱQؿK,9JEaV)Z֓%F z^TQbN^d>N}zJ2t<m&ً(}=l$ j_=%O*!4pBd3rӛ^k8ƹj _rP<a ~]Aɴwnə(SKK4Z[ )0v#hs/ r5'k'*lFpƀ<,կ0,''6vD# ?@}lX3?Vg'Ai5kǽCݽ)h[ gtq=ߧpK [+A_1UmFԯF1*6{rpãbx+E]Ɵ]~H㐝4ܼMug=+h{f&΁[JCѡx+F$\ 0`k+=1"HB46f 4CJɡdqbAi2&$Dx/ɋqP8*4&Y8L\v=#ߞJCxbz_-58so#oˎCKp\U]<=}]lOLq%lP1YDX4iJ.I9^yY~CLB[0Mj#!%2ߘА.NP@agQGϊ2&Ѣh^oV54LXɽ#Sɪ{* I 5j(*;\vZM$0}#ǽ難Pmwלo@:}Vs6YҲ}[O;s4~E}6*N>\jlJSQlX[4:< 3\)ϷFy҉w&27t|fH>1`yDy?!_l$ \ZC4A/AelJ`!d藑" NB섷:o$h4M<|=/y$)o[BKGkm9R8;o`t,H=,ׯi ܶQc, ,T+3y1]q8Ge&[mQ;l]6z\, DZ˿j%44[oj4/P]z5<tR8ZPլ"mr!HR)F2QZB;xFW2ߐ? qSF#f?|X4=UEn|{coT&$;^I v-YFׅX{(lۛi\ڍ^;kݸ=*0 NnNRnK흀 }g~`9PWpXzZy~nCs-}*e6';| lw'k$N0'+u'Ȃ N3X&^^6LX֫ SMcDdRۆqi`};ْmCUq FtN˝[ wnNm*:pg'-$Mǰ{ztvcq{ ESs–+2n F̹q Jow&`i?,ĻxBw&*~ܴ*/ޤBwQ Qh'YQ0ڟF GW, T1ݮhw"eqEHEWwRRvEk;E b+"XKjkԫQWDUՕN]Gu%aBU.6{)+iҋ"~l"'MIq}W?/͐8h&h%BJ'buB<|{NJt%fjUǎ6FJ{TӤs?7z7AïMD%^dh➇5pca|.:e6Kȴ6` OhKe_i凇!$LTY1QQ}BhϽ-oZi?ӽ)QhLgEy%d%EV>$Uр3Tda>jnM(9I ] lʁ6|l:Ų H $&1Jo)F mB/Pk.^ِ6ZJK5my.TJ%k ,f2EZ͆L@ĭ2DHYd+uFh :`ly<2G'm?IwĮ~d+\)N[aц3Q˳CA3ߢ6L_/ͧ~JI8BGùE%&D!Q59#<ʒUhUVIi'" ᬀ *:OtQ) ֋=вFKANIR;w'=t]"@\;,繘 >{ rH MlB7!TMnƱNsHBu1 WBΌNښ }#f%%Pt6DӢrojUF$c JPLU&.)tu 8DlU/g$ BH& zC~ǐwan<} mc([AymQQvjFUO!]jZJ)UX? 6Ts]M[B^c} }kVfm{:{}>h:[ru9K/` lx+}(zGz@AE9_)|âAN8JfD ](Iz,y{Yg?Yq(سX["SH6mDMJiM:btJ2&i;5II'4 g2ójs\awr!r!Ip[MK(j.M),Ľ2ZI"SdicC!/Ws]+::K(vYo;HMM7y0:o/%nH2X8`4, .$&UĤ0.K:D(T^a% O(1ڑ`K#bP ֺbDr!$G2$f^X S$#IO3@HfyX`2TwW\Q"c+[}xE"QM:bKd眪ꯪ2)HgPkC@LŘq1т 0Tbj+UPֈ3U4SZom*ƒK cF 8(Do73:ݗ[ʯGE/֊Z5!Ra,`v1 !Ai?:q3Ɏ7zG:}^}尭 m=voz7Ӈ bz߾Eo}v|B7пA3{tYENXRR|:ÍiH?v픭tߢֆð4',n[z z\KVKVn-[Mz;O؞լ8^ n2ū4wcOtG'G^S7fBRKoG;V4NVF'{|8xWkeAG^R0/x|ܠL3IS%Wc27i(v|b;a;œe.TZ٬"j}V fDg7pW&RBl Cb{H1r,\ ;LqK19 <1ggS]$0fq晅;[&blTǮnZҰ-_U\+æ۪Ny 42DA_+"4L8(qօ̙ۖhϻPsCEڦoϻpL6 :y <5DxU 7XMY-G]ZӾ3tfXoy,9>ѦƊL&TAd\м󂑀Yɥ֒'s*]sij+o =Z) kPXe:aU:@e畎9)Z)iUf׉=UgJo8+bE}ةq]`C\y$G_2rdזd_it!f}ZslB7Z^sAaVJ3TF3T SND[dHu-y>r+{0 l6 8=I9'~teJdJ;"~*pLJg8(~7(7Ÿy&E>gO{{w3B.M?σP /!in8c~7q\]}0E 1|Q6w(|y6X> .2q|yaa&\Qr\6:P.iX kg{1|,#04.{sK@ol/ym /ˠb~&7tf,bTC΍R4_lɆ i4 h?/c_ZPlZjfH>HMʌ=w애o k$& )mrWq{ #( @M62R%KmMwt#(^Ppa0~&g< E;}/~[]B<M4oZ0vJw$pm4r1wcWwD=0҅DHУӨ4Z%xv-or_YiY߇e~dqh<ޱmG(+)'!NPqin1 De2.A Ft4^9cц`ZA:"`^Z՞?wO%̜p{{ɸۙZ:B~k9je"Zf;o`P:g`T^ar.Q'!bYdVtI0Mil!46*Y1GH`Y1h]휺Zo88!%]ڲ=Ir!M<1#P*[04yWyrUB NsE2!CȋEEC  N,S=u-'aE&ZDU""qw!CA{μKIg-8HNXS*Iƹ%S>h4!P"Bf"gcLV\LH!: ]I]\EbmŽjo ru)YMJ]Խ]bMU]A)VQ?XtO١{,>DȂkOP5gp(ތjd5!4FIojs4z@~F )p!}W_䵷k?ۮꚈyʪl=:V3u䊉 `I(lQ`u!F3,r =XK#:҃RtԃydJ<7J@iù*Nŭ W*EdLz !q ᇫw/IuRc/k,Prz ?~zE\:^bK,@B.^{ZsxٮhtC/OL0쾛4cO~5˻1p{}cŻ;_cdQs>լg^jw4YCtf N1/yo./yu! '͕oPxH&}6/"6 7 o@=tq']uIru'tr )i5JBRAdy`Vg '=: .pl GCo2>^:ȜR0kZ#@rv >#\ZgwT| / Sֆրj7L}`fgL ǫ'«VZ;{ۖȲP4{ao?|\yqb՟Q$X+,d"KP[&3% @ J9XJ:d19@ΐ׶VnYބ;^汖D~nw~-AďCz߽=[ &7<}hOxfN\nIz%A4ި B$1ΒE6Dt \;0,7z=;3,X@+LF 1О6C/c-cpْI@n2!@ m&f=: $7>W%L(V0KBB2YdN_,@ 'DА9|WT[#QmF_)!Gs5_ח^l3!7ݡ15iq6f%%'Z^&7ZHgDt$"(&9' b)r )[!D^ƺbs zKJ]ko[9s+آ{l9(bo ,xuT˲R$@{GKuMYGI۱(j93gy'nrr52(Ȩr`;.Vg?r!o }Eۊ蟎)fU>S )I+:)Izp k?ݏ|4X)a4hQPॿ{M3߯g^"zD Z2GꙵGULlS[Ƭ:!U!Jҡޱ"ĥ>"RȚ)|YÇP(ɝ 9X buҴ{  ,愢b۫a[d;WegCO20Ӻ{.`V|Yo7tnޔ@=uB8,|ޣeZӢY 6KT?m6'{| lwoOXfoa0Eֽn#Vool[|`huy}aڠ^5YKjm<\V/dն+6=œJr]rRཧ{c[1mBwtr`|n̄E/[;8GW\<.`k#"|EB.mA{QGAEw>cJdNYǑ'=bgN]X!j}}lh8߾O;3J `/f=/lA1rYcd\aPybFB20;91Y40d++7ibƂhkEݸ.0՚Cu ::]`(R(?^G*׈ōtǂmJmV$3 6xh61Gf\m˶l+DW:zζ ] Aq'u7^wRp4ZGduZzӞm:vz/rO|@_0 dJM2iP4&2žBrs(߳u B.(L !$,ar%XN"et>䢜^h %hEoHu %=%="Y*Eт68H`S&.Ί9' O{'`Yֿd;Ci4kDX ? ~4p<)Y`F /%6)?Ok}+ç]~k(\5iood񳏺 +P`Sq\m,74%RhMs[1> ӌ@,/ۛ6U.4hNNja<˖3/Q%u_%Xr/i:o{8?yͻpU3u!ٟeltܨl\(DqNuΗo<"Z| h0 O8ħp6>}{꩛!~z"%7)3 JҍqG-;Jgn2I~l2E,l3mxrH#zK%f:02ҧ%Ot#Ϲ*YDO[i0vCxVEo]P#ϋj6%&Ew|\NAcr"Eޗ3{.8%ZL}#ykwK K*@gEF|\\%ܩچj:fmrg/lI7R臋^rA}-qj {0%_4v]rkӄsc1 "O5^D#>~=qod`12):0Б,kxR j@dxK#"轔N1P'9a >lb jO[3QĀ! 8o|eTM=z$ 9jϲ`sL,:%A0 iE2W{*d;xI[/|~Z쏄^{s$LSbp'{WSap/vHiL5"5(-mcfv 6P2^ t$``&s}@u2}|Y[0K ٨X/aׯAzI@VR)뱆U+d( Ix1&i r%f ZJR${أ˦-riP0Ӡ ?`J '^J腳`ݥ>z-M~l~_o9=ǍfyY ӗv)q5 "z?6zV`1(,Uza*;m["SI:ԋCw9[< e¬Ԗe6ߢ5z!O˅ͬBXsS߸G={Wn]:yWNjzzYht9ԵЮUAH8H0^8ˈ^>}k\n6+Isq WN7јԔF{5]#=];nU}<.cߓTKI 3ހM)zcp[@L3+-#2epT\UczOt}JU9?˜T@1і3`md ń6fȘn<2my9"ېd6~P~!'\HkL]_K&.B]J~jzȼrd{{!O^F:9m7pqʝ'LmB7tSwwʇrXeɺs+aiAKVk`JA;ý"yzFet1{J)2&fA$L#uR49)*PHtTmXMݞq;J9//' k~) g/LO76qh8sM"p\Nk&譵Z'2*eN%+-*rv \؈tJ"g{.xqB0jn1CH`yRfAjcq\ .;QK;E{υrwNr F> 0 J2LeᖮXϸg H@ RD*AbL.MPSi<ǜYF(Nڹ8a/cG/X#b{I,;iY#S!uNk(ʮ+I0Q% !іJ#I5\>L*)"(.NrG&nx9ѣ' b.:iKe{Mu\VwȬ"+F8QCŮj^v:fk^dJ4Dܣ*/6?Rl.PzlU/Crg\5Je I!BIAP&Gh*HұsAP.;S0`-wI>j /dX(ސYy1rh+9s?fF:! `UHd(NY%E%rZA8] _x&dz}~+{`5Xq=z#H_0%Lk{Zv`f{L ǫ$hyBՄ$0SMXEu*ՄEZe^MX4}5&4{>zspm%y q\w8ok7l"dlb &T雁iyDч̥<`@#uϾCQ7>-wWt.%;*|BP/e1 ּAԞ=e*Q+DR/H] uUUw9*7V^]u4P+"X)JRUV뮫Be6{u4J8WO˻ 'O|ɜU<oHJo LQHwL\aiLY8w܁d&f%p0 H:5} |o!:mdh.psg SFF@+d ѳLl0: VB^p82I>J~:.ŽS$5W%x76xF3Yx =pggHY!Iq5BHtqsÙscR$[^FZڪ?>Vykol's#s4D\mbb׉ #- U ".-٧És#N;lXxcWRtieۉʏ)jۢtwG ʂ0>Te*'3ǘ$ut2Ib`ARARU1M+$33RI Ie+o寘IzÅ"cT*zc]95]m-9 ɱx_! 8Hs {*I '  1Jq IpYQ-̳Dmjk G!IPrGLf\&E Z II֌ٯajgtz O oe_d|v?I&Ȝ}h} &_FçO,%MYB&9L瓗*jcS !\ *)j첋-i3Kv66AKe4Eɶc&91Vٯd7L̮hj-z#]tXҡuq58V#Q׈;XF9#' ,!:4`d J$tQ! )꒯tbwn&:HusE#@dAP\3AY*^/B/vEV㎮C}>< X~ElWeXv)S/FCb}_/FC@*)q/(,)P{(7RȊy=F҆k+V8ZFF54a`ٖmWz :wЍwA7ZrJiI KMLE#7AeCbL8>jaSЭA.hl>|i7/>\ft\ޮ ;IzB]snu$ZC n_Z7ֻtn{i4gZ6rqxo|iF0Lvv͘e{:HfYLu[5xkjnMsSn?˦mkgr%j]ۥnPikM 44(MnrD Fk\TF긑.;gF:qTL Ôhmi"8B9'gj#63uE=XCA&3!*xjHjȵkH_.GZܶ~7o؊  r{jilaj *wf% bzLx2Vʂ5Fi,;&+h6R osm at?=%c O+(y"q`%s^3}A=ZoDB%1ddT2:Ue\G! 5)uDPI!TH6U#o!H$žrI!~F sbKf}\˼pa,f] XK褒#)6J ݆Pcb:*c}1™r㜴6%Sd[)bV夵wjuʣeW.ɘFf-dR;!YR!@Q[-܎.ɸJTm&${됌MS(wz~k#to*ΠL -Coem6TXB5 < ¥(\ws d΄B(u%ILqu.Y`4=iW7i܋6Hn2eDŽYDC `'" %\eI5rDS߾viw,i>?/v`eb6aO=w*2A+.?JP25Dqd1 OޗNtryD heg UIr`V#gߋc$%AarQV<)*1-nVUf\vG.[K= f/'w,^B3 V/O,;D_L|{a҇^5#AKjm< \;Jm Wdz.2^庤{Mw\-N/oN3.JkىiZe$213'&a|lnq"xu夙'v OL2ǖ :(dםQx.{,Q),Sq_L0S*m,օG_Ǣ̭<8۽;P^;J W <CFk98s֪!`-zbV-=de,q˪ɟL<39&L|__u6u5865TM ǟ٩::|R(1߆^jw'H<_cqRG'XRqxhGf1928Xt7\ Ёfgpr!EUz n$Z̥*4Eó1Q2#pu+J֦}I޳װZn״<(a'X41\мuB)QK"kf>ѹnɬZ1p7F` Qf Κ*ABR#~=TI"Ӣ l3mL?qBk08'S>/)C/U2M>d}"dc}Ǯp 3B/K2E,'2R넋r{Q :(0΋^ 1=%=Y"YiI/])J㹈 ljpƦ3Y3zp{+uvedžgRYڢS,7p<+^,r3^mRXvp7N^'R~Kݏӗz( MǶlI OqpEm tˋnކ'70z``WUʽށ2Y?uz8 7z\rm0M9M1\FE̮0i_֮$lk)Ig8[kv;DnvhAh0W9?mq  dW ,Rzң| tc10Zoq6KٜOo5{J ``,yLiL`z}͚&r>,YCi=Æ:=*=*#;A)bd̓u`eHpn0i4 !-EB@#}OTI <V]c.1@ȴ(b![`2NFI=(!~05J0eiɣ6P[R/3[M^?s|"` yB ҋgzH_)ipIyEvcfN7ahddFʂhJ&)_YER"E "% SrU*Ȉ/"v ]omogYZv׈'/iTayН9N| :iիS0FltD`hDjN.m[eaҽ9ʪH4auRm/5~6';?Nφjvlnw&We'ꇷ?M0ZpC.GQcj4Q5~K1nHqIW~߁*zCde 5^BS n[hpkGֽ'Q8dN9seeB(edFf|0Z&+ڮvIR(}D]PϊQJG"s1 K5u4_gݹ_[B/!9ip';فeؓҶZڞ45#$+F{nb`S*G}}ihHgTN^#WV4: - *]gϺCf5C&=3-ʠt:P$`q)hK3I QO*!iBZe-w\{x@FuG \Uu=Vך5 Aj<`UE8&Ԯ$8oG떾]_Ë5gߦ帑帙14MNe(!KyT,"kY ;Σ0'-܃v C=A=[XWOoY+jVd RR9W `{,c!a4Ryg3[$t7mYRjMLAy^M*≣O"<:LH7K4gי*קnmVcqM?]vAw%h7^(m)c&$k ';Eo/h:FXfN]3 (vh=7N6S& ͂`Aq84겋n+$A]+&m\QZ^NK['(m(mv J/27ɒFGE]_vsZ)[#r\0k]TzNC <h8>ny}t'ݟKȺG11I{R%҅ i@`3Qlsr6S)lm( Aun'vtDt` >rI(LD66DtgPp.c\9œ$0Df2$Axa ,R FU J.1Iavgy`X_.|m @;)ty~V3{MCjݙ(e X r:B{P4M+(;Jd.y;d|HA{a 1FK9(eLENʀ)xEoLd`Im'#XekvQ {)_zg6I* m7|h|d9)b,zg8h>c-9KZ-!Eg4#uK|JSzBK;e83x q-*̸А*Ԃ|Ma2:k_2]ٮ?ȒWͺ-',߀:(PԵfwwD';K]0񲁧n?nx,JZ]84 #-#do).ЙwƗI| g#/rEQ5j.MWó/MM;z4b ܻN͆{fw#tFӆ;Rp6U"X3AV< =6aw8nčG0_x pL_<Ѵ-GJ'_=_ %k#ȎkO].w_`~] uP3|PY%^b.d'u!>mؿ>7Ewfv3Z0ucYG`trRYȌ'&p&Ŭrw Lߚk[/Y.> Gڂx*f&\Yɥ֒'s"s Ե)t2B{۬lLxI!C*KHU^[}rCmA̤K-ȩ'0DC60+DMt*LPJ3:H\ Y#dJVsAۍHGcb)khazE8;y'"if䫍Byyòؑ2Sx?bN |u\8gqj\Nsm6)z&cG_/'mJߩХs5 RytyqF;}?9gy* Su_ kw(?>(>=a,# h^e\wUc(lGΚ5O5lD:֨#xW] ^NqR]|n*cb#OmyiT2D M37jrXrbǴ?kC+LDT, Iur5?)eC'_JIM~!zzfdH-HwKvtj$x`h̟QuMK0?ZK4KBq} K@UZ vukO'RZ)k@j0J+J.}Zӑv@}hH Zxb-&a^!$u!9<L*AE>,aٙ5pK(%qRgy@/lbtӚبjY>qս$!Cwek-4'  9/4R2r@$+Ax1JeSWW#ei[c*Tcvn~{A31O8?zn{;fM/=cŸ=mԳ 03lY#3ljo݀W'-zz BU.A#H HViEDB;nTۨ(6ڑuIS*@\Y`Y{&)/0JY+VD5񊶫}-Nj-sC vt4s@i2Ii[-{MmONA؍TD)Ҿ>M43*\| |GCسgQ5!Wk Xݙ6%ҮzuRIy|R iOj*TTun,Ar9{?i9vyop|m)q">8\7ƬFe)!KyT,"kY ɲ0} OTuii~gL3(+֟F>BV9$'\0LX N==뱐0J̳@䙭̀l:ڛd4 F)&& N꠼&'| %3D^768 Qh`ܕ|< x8r1W4#Ej*PoN6/hcM:M.ww 7`/ףopI:fM)rvqheI 7VHV2VL:k!>)uInC?ݦNtn;dI#" دA;؂`-xĔR9>ֺP 848 Vbq0}܄?5OngKȺG11clw,ixn@oU5"ϬRB7)E9EX$Dz; <}%+^Dp1~ FpC.Xn i i)kv`p5:VʇASWקvlúTAP$GYߩ}}}}FcV!( A@+o BFXYKɅ!I&J;YĀht&ԁŪKH1P?rYIẻLП LκswC2m[|RMtFvhiݴD\ٗPuqt۞!Isrxc;C{O;%**cڇd&Is {*A)ODK2HGc85OR g(HUQ[c޳%E$H}Xξ.Ep5qeXMXTj3X(JT6V>pweYX-{/tn4܌;Gl4R8F'*j%e/BL* YJ+3c(ƞI66e &m,1%M9̂JjdL̓98Ԗ z&Lwdiqn l!XA9fAg[N\5+m\Vq !LHE c*0cV2q2Vg7F* bRDʈ "nmN!ghcdrt|p:g ` Jƹ$2hE nj31ke4 r}tdCI):!j<8{]&z՞pq-|fKqQWEpģ,w9i"Cdq+}d"Ќx8ʹ!pT8q xG%}"2gs~B|O(Z~')Q> hWGH[^ zTLm7/> \.;^w]oNȭ&*͋tݵ+Dh{ץ+;DbП |tu?Һ[==lGmo3~;\Exƻ;_y0L413J wt]:?,_|3\F] ]Ss5U:~u]XWW*eӶܚm~$ۼ,emy\Ԝ!ͯ5VHߥyI΁]05تHW'nꄌtuF:SLIÔhm$PRfCE1 W l@N`ރUz\ &ԙ *:xjN#vxc-qvK\>%#+6d5[qqF:L-iױ14v4Z|?1cheNF4~}9{-5L0Vv:ہtC  m a4=$cI|QļNfeU]2B d`32!<)K\*8#rT@ڙ!dK* ,ctY`Fu@3( z@N-1%28=!rpeYM=JJYh~0QNW$@#%0etuy?_ӷ-nޫx"u`C>>D,i71 *^#c-8+t RQ[$^Jt B),NA&Ξ))iB0DfbʁY"qYaY#I%0"((M!I7_EM ZO|2]XX.pngz+L9fd2]V%ڣک@%8t xvid]QK*!jKEQ j31]ӱұi Nc~е5B76 jm@<;P .\: E_yzʉd$Wl;s@{$(1]z'zn!HpqĠBcyRhI3!Rrf^:/@u^FA€ʐ䲌ЙӍHԐY| +{L'zGh:v_q2ܟK!l,?B>GعoE0Էڙ{LV`i>Ikr1awd#e#ew@DPa@ޡNyqq$8Pgf6*V@k5qvk<v>?mS,"7ͲlU/}\K# l \q=*?u*R*hpJhÐ\\"fWEZ+O\}D V5 v}Q*}r*wkf|exaS_7u0M#=M7rJ>5-4nm>et8ɏ[wGM'G/f0Q_5.GupAGD1=0w Kl4TzQ k+Im:] _˝,}0K-PPKoVuRJ߁Cݡ +Ȕ3E`<G#+ .J{THT QӾ g/Rĕg/R'o)M?"\)Cư:#"6gWE\HZ+թURWPXs^=#]5FAewNOw6{\D2G$I"L_oo>2 ⷿ<'kE???>bI]܈W *e%҃b-嵴ZZ^Kkiy--嵴ZZ^+,K-PmT j+ j+ڊ;j+ 6]kT[ՏDGFɳ$q-M"6I{GQaʶxTib=SC0`蔺@Y0d]5YV%#haqOl_j#;IdFش-x5Woh=vAfǘ3LgGy>Α_G>Bj4>& fn>9"^ lHV9%c>&Ɔ^ P{_N>;php3_Ej9 g:"ʳ{?:=/{5KTDUN-QFbȓ!&ib q$52ǍTl)Ǟ_cb1wǐe\pvy[n#׿_b> F eԮ,7&IJ!G}/f8W6;ϸ'aQ47]ƑM.Z> yMpӽ~,>? bݽUҲ!nZbuǶcoTHTb`TK?}h$';/4/R$T2F<`LAK@ 降>iɒnd 4AJP|V*(cU]s[D0)c V hʀB+Gr2ύ'bٝt]&na}I]{.݂]A?n}V3qK]睠sҎk/{&S٫h5-ITX)o]wmMo`|~zگ7aB40x+={o/ ~#7ȵZM>/$K!u5\ר粒6B΃V0i:g])HC͊iZ$u4dd!l_{eI)lO{c$h U{N*-񝳮5G@iGG۵#jQ+dpiٌZt΁x+޹%&EJ29q({NϠ>6!Y* ȄYb̥ GDS,8ď8fx!g;+ v'"Fxn{@հ/˽o{RV~y鹄__vv[2R{;_`HM@m,q<͔+Z$Ȅ5W"5DX!Bi|J 0|`1>c@s+/TVQ:\RttA:FH"Ln{Uj>O#fÆo.|ܭEnChjj>-ϴĂE" ,LCζ޴ Pc:c<26= y  7 Qoa& 4^hoJli:!O)t݅Qtcp|>2r{M8{%28GH +_!O>l:B9W, |BJ+}jAg5C!Cz)m nay_EwA$\;qY8H &xNbwO)pAbQ&1n0^ >/XnO2-o)Rg?g5.~Zex8Xgz8_!t#u؅ N1\S"Rjx)[i$Θ\,_WUWW K|\ $ڀhaV$7'[\z * NjbO 1|&x-Rd&&vR7A o1ºeYepE!M_~y~zlvm|ƳKɇz ( 6h9k#JC}%'`Vzb#ze;lȹ ߺwf͈Kp>Yd*rd0gJ12\NVD-l)~?SmHc&yzwL{2 طlezc5$(ꫛ:w۰^z-G]~OX4BQ檸:Nmp4]hum(}f*n 6|{k5g:,CŸuȎr2ElWy2DN ЀD-h^Iik֎f:D,|@I6o*딹:$$ǒl؎n Ύlb[OLz;tVpCQ$uDbɤ & @^@$yj|˚í|E*&TFѶyWN1J(.?gAz^-Ut\Bdl 0Rodܾ(<3-}{tO1rL2͜.$Y!\:eP4>mE"W^"q췦ښvYH iaFʅcY^0+Y ]H!*'</f!g[z*&'g'QNw .HT658cs䂙IYWj ؐ@_+Τ U"wM5Oe&yTrL>//">)b=u>g*Fm]ResÉ+n1,'JT1 ^TjMZ8P,zsPlzF[2hqmxpIgN9V1ɲkL"tHw47hnUVc2qPP@e{U<Ч)דO:MO0Ч>YVX5H/88Yւ(WhV_M*+BF NWΧRQd/SHiUuAZ~W~g kp:2]x*K$b|:9 }f~?!vAfmjplkl{MHpJ5}>iQ}O{:.2n@J*M]g^:!uEL;K,{i>C*I$S\DT8> : llY- DHч8d^5%/^x \7k Ύ獔ݦpz5zIQN 2U.FYHa\ I1 ^":WRFj8Nߠq*V 觔= H  9 :a66P}KVݢZo)i7m7&'^ _>Pxz{_?q;˹:}xLN+]LMlrkd!O$Thb6L]"SYR*2  %.$(ύ2TB`Vy,Q$-N'lAA /HRM3ccp3csJc\ؘdʅNqmzG4fo)˶M'q:<=*?] 'RҔ̢f"|T(Jڕ٩䄐NjK J\4R^]@ƞODBJe2%Sжc5%T-J̅EcilM2--z5؍ѡP2*{eDm i2%h9-3+.2p)f5ϱ(m| VD 5ChEGHLELȧTTblגx66g;F"c[18eÌ({Fqīh3+^9`Φ2uC:oK$. &]Q+ԐBv6m㬤Wx&:k0"H')ttpcp3: /NIlLJE0/{^x\!J71yݝ3 ,1ZE`l$E YJ_‡IG[/@a[*o{NZ ̺0:q >HяJZ^8c20GT̕Fz7r>BXr>DuaEaaʐhJ/tPƣJi aFCe1 5'd)astc-f몮uη-MwݛnNgڬ,n<)B}3B:溇rzt5{b{r4*ulW4_pE-_ͲkpEՕ^oHEx5Vlެ.7>_w˿+zH9;mZ]Czn&2>(m-1A%KSa5RFj.[gF:^1% S&$BY%pE >\^qmax6@+Gf  W^zJNZHr ㅖINc^ϣh+ooVVي3-xSVSƄ^MmBMufVcU#9*hnn3 ])׈h5k$O\#J#XVJ-efT(㘌BUx,U(YZ;ώKzȶ(|w+6Б>bOqk>uN;℧ϣ%P|b+-X M~ #v$Jt$  Pݦ^ 3;Ý96]X(y\6G=Z}+vٳ=]21!m K:CWWuS+8{X+쪓](~r5/z1xbֆ﬇h&:`B &_߽^Og&M3 U06>߂ +dh4=1~h4l1hOӯCʻp4]O`)\Yh#~M 31/}=Ob'ף8+%JI_!?}1%TDs. `r?𙷃ɧ 6?]:T7jUUY* *\QL3dX"lUgv[֕ݖﶄR~=VqTyvsm+%idKNkk1%>tG4ofspY u?{ s&mKOݎٽnxpsp=_.Q8$άfC:@E.ܞ5F~_vrfEҸ*!Dwr>3|IKiE m; #`)c\Ż #ZLUaBٶ**T]!`CWW}4+锣[f:CWlW v"mUNEV^ +x."`!J$J? = +;tEpug=D+o;]J=] ]Y:DWؽhWXC{9x|TStEpEg*v"=]}Gtwz~Ɯn Ckt'b?eCt{:tYKWX+th%m+B+nw"?腰gJJk{/(M#` Ai MZNӄROӊ!|1uJ³FL*.I+lv[\.ZiھJﶧ*fېBz/1.=ց~(JsK v"Jtut1in ǜ7vBboրPӏ?9ߞhNL?C_>[ER| 9W>~U3N G'2EX߄T?.ٻ6#U;x~?`H.FAE?-%)rg#␔4h{v\{{~a.r=_?I )J-Dnƿɿ;JOcB1]UVن5Rѿn8An<II]{~ u?wN([ҷy c6xfuk6=3+_~{~]>b|5Xv1ch՝JZokbz>Q%8(ё APiQEdC xLSM ;X;a މNIx>.b7j嫲ǷMjU2uFiUj}f{i.KUwх=qN?I>ؗ.(_0Zޞ5BɴLeE| ].EVL[-xO2Md YvQ]O;|Ğ_rVi|άNq+ΰ)eHkW,?*wTr 'xRXʳ 5a>ճ /qܱ* _E!Y>E)]\z׬=(WP;L2F8"OR+E%e)IBQ8D:CI>t $I J\3uQ9)I"i"]ɮsHmC1GvM Dvw "y$#t@ކ4E͏wx |!}%)^|A3(%^{(.RpRD]. ZwE8rq1s *j>jKS$e6ANIRw.)pZ b<$&%CSvΓ1^ƼWZnH M|CBRmuȬ8HJj ję0m0&%!ÌN% $#[yPNk1n[S1A"\PJo CQDe|;r@ ڄHy ={ YÁlC[9'ԇ_)_O|2f_EvM0c ,5rަ%J}_N7gO.N(_KusbNF묋@3DC"QY(5Nџ#:9@FwFepޏ9eY6(k4:r91 $QG)eLsʥ4bN4(t@n۝l*0%OAhDI*H = nd BC届̡LV/7 ||Ea=.NxE G y&B I%,Ş_ߩhHi9H K@@)NpH|="8aAK u+H%VhV~x Ѥ!0@-Fg=kb Ħb_f2UYbPF˭f%nSԒca] '91^>r0Tns@=TGۼ s~35/x8;ϕ|5[ѡr7ѪA`fEI7lwgѝ_]/q"95a9GbW~|{X:-{#'Ps$D/B; VyA!=\Ժn9 ԬK0YVE3*U g4er:Ǟcx9p3rb^:t[-P Y}G8SfJs~S']oӽr3dXdd_b=2?""#9-abq-2hXm0w>xq|Ae.:lZT᥂- yMpPIdyT&Y!F0IT -ҠWJ&ɨ'!"9=p{ihlRz0ᑁfB0 )Z\ͥ ;2bHS*h@o+͏ 4/`y={"8vCxb!G>M.j6z&o9)o#nMRJ.Y̴\բ4*/ҮPHa:H.ΘB" ^|29hNY_bGı_:}Tc:zRjg}aHxp,i ZAM#$!$z/S*NqP7R>K,hBk?omTD $jD)1Nl8qҶB:!OuntVG_@y| Gt|?]6{lAأWBG:8`:y9G˞T֧ͣ䬯3 _?37-[njx1<*d /  m=[/ܣj>ŃkYf!O Iʘ2x:%z2Pm@qSKInD9$J")!d`$PO1&$V::an[ }5Zw9s1eoK}`N[+9m`墳53٠c{2:愲 =%ݾ?o,8|-/דHp>.?`H|gї8szA(CtR+eN&-k.6'D|r8lxcQAv``_߳if_l]Ckuk[a{JPO2~(tPocQ;7/$)8P_0 ϙ+ս9+S6WXVDbCLZ'J"Mm V(=xåym 9aQ hd$obڨ5gR əI<ĜLuxWg9B870 F8GU{d 4F9qW/Z_˝"a!:BMA(GeUu1ABhG12Ä7eTϦt0~W/@G;m߼PwbX.H,7iNuQMQ'Jp]8BD{1Y'xV pUQG`L_{iP1NkobB> DIBjbCC#=r{K#7?lC}CWh}1?yg`;&}BJ~JQ>osܯt^#hktt1`MD)o5 "#b?>f4J^x9U vܓlKגF!~1˗$CLxSO1"eRs(p\q.**G:>c+yfSR W ߋbO%BQè)\R&y E{^pGP0o|{ [3-|sQ7 D\Khb?oˈnQw2_Z#hB+E ܰ9"A\&%4Y& hCv:>ë#3//czZUOy5cmPISm!Mҹ$$ ؄&ZH:͵ bWΓĄkLSpksrÓW PbQd51A:ի7҇w~qϾ{=e3"g(!/%b.+0q74EV~4 UEEm7mB[R6bLb>亏?"kʴyu!şGs6;Gt즃ꠠ>1}P>1}pvzIYzIR3ja0en#.cΎIXIte]sٵ$UC2oR즳YYfշ[izQk# {Wʉbr{jmxyh!mhptjl=uψj?,:6,l|3]1l>ngnC QGթ7-/nuax3ZTlb%w힎V~naIm b ΃ ko[v+-g۸mj32h4Um*2rljk I9$EA)\eq8ht5ޘ|:DAqAM`A \jmtJpI[מUc Dفm;ſ96)F|V`*fZ2Fd}"dU$ 9Rڂ9蒩m!#)(q؄A=.("%$:ehۧqR0<.VO){!f $F*ES0FغO{+q;juS΢Z,5}dE}+&CPx{:}7M^Ar~NlioX4EU 1{wOey&28!-c{AH֔J#kG si&\&'R@/ R8ۑWi,c_,T',<*,Ceoe_Ćo;<חa/#6gJTXf&6\u"Wj)jTu->P=P!H%+l*2#a2%S8&Fs[dVR̃y-: O vP'9#齖& [BV2m^X+8նuoE1X 23C@΢5ILEđ1C*:Iu6?~) b31" OuYgSTr>sRq)LJH蒊!m,A97BT2(KřB)moR$2u1Q8;{yly:YɾHqQp_fHEbhGw)DgE`l"Z`^K9Sa38_u#ZjuwVWiQz*>U~9UՎS΄>?\=H`ajz^z=\=LJ JjGG9# 5G?c#`:Oo\Og} }ʶ7yWﯱ#m:Tm.E"o6T>).zz1|f'Wzu.f,Uvm!@nA':(ɫЋw̺T8۳яscg?108^fJ+f3ڧمdKoG/Vh߯W7~w#Giz樮e wOZq4)ykZo>1p`ׂ߶AU.g˼88ݿhWdg X:uAkLݶ'-l:b]&7+= oYV]qS`'#u:$NSl5`rN qx-Z,gH7F8P jf7ٴP՚mu 5<_y.-= ݸ.h"ZB\"ւ\.J׻ݢ !//eRղAGZŕXBx5+IOK_RF[_9zv[in)qt|ʱ|EĔđZ u'M'Kk';Ё9y.JGZVJͬ>pd*zs8ɧ(cB.cvz[x N[IV+[O$|/gv',RHꈨ0 >*\E{,TJk $ Tt\aGsIJXJġK pu7#:WW\VZYIn_S=5渷d3X/?l~)7kV^}-V=Y]O5 )Gt'C-/'Q&׋"r_xqyr'[TZ׋[ރjqJ7w*BZ)l{},3|4#+r5XƚGA< 1%F͊Pˀ0 g55KkU2#|DϦص .{蕒ږ dU @!)2EI(WY7]%-qd '4~5iW7Y?v?-EOxB8(f[aшV2 ^hI;IL^Ϸ)'6+\0*$j6>$cR }svH٠Aj땽i(& 9\2 뛴&Ɯ"#+DJ΂-Z?Uks ju;1S![*E똂Td`Eg ̤t6tY$bs)`KDHٙyl1X7*DYJ 0bٺL2TX41Cp40dH ZP N e+zW5!Bb y 1DWiS)֔hL: ~)l.<#Bbh]0h' ]12j#-0XV%]l"e+Oh|&rP*9$|qNd%CE9+ \qяag[at k;mǃ NGo=8ug]Kɞb !ؼ(0BL&D w3lә`ZN.H0LjX ^ġ]- At!"jE X0BP8p>n:@GVJ$W4Z*#``!&SAyCs ~XĄƂBgs՜$rDNW2:&+2]6xѸq= %$DGeYLZ[]x nm}G VGES Y_~^jt.(8m`^֊N".ؽ{g}Tּ ȓd}E.`Ye>*&G!]Jr "Q _tY$ec,C)9jDκ$!;V@r0]Zx _3@[vThňe;8]6 D(Y k?l,$fj2RAJL `T@AoqVUpVX0Fa!dى$@3A6 ?Ft)d%VmZ 01VRtԞEw64IxFDh3+, Btҫ*^`~u֝ZF7*mU(am ZR0 z1dkRacIۻ.`tYΦՁE/o LU#[.lgNN"i`$timl% f @Sv%[Y:6tk*޵J9O)K ]#rtãa#·fqۀ ^:)Jj]d*De@e!5@JAO!Ƞ r==ZqaKP>];؊D4cOn A"g0E|W &ezCѢa6 Ed$yawuchp?{mDVc- ̩m X{u #5ziFz@IQ $_Tޙ4/Q40-l]΁?!\뜼Y%5N`joZajl];k* X[0m@Q'X©:lt ]CL iDF:̈r%q{*: FX2\b$BU< .zul)X;ӗKC$1ˡ옅G@lE`$1DN Zf LBtm\z!wrt `j?ʯzBoj-n^,η^Hd:Lf4EHp+r~ojy?M)M)Ns#غm:>~m6o[w2$I6ِgC>@UaUl%&{9 @akR F$+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@W KR!eڽ%-m0^ +a*@TOb%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V^%P!;)bT(`V^ +d%+TY"X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@zJ R`^Ι@d/E Dܕ@dg%ЫTzJ V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%Q^ZsiW&o~RSz~y}^\^P z׾D?{?jpGIةiJOeWU6f@]nx?7gcƒi^XN~/3f 7CzxA/A9zc]_Mo.}0H ߘ:d-lVR{\n:PA!D[kQirƟ/J W.SOl_M]F Sg|A_!o[6oh9߾|ЛW`5X3x{-~ʚ e:_\~8U;wt/vV|xŬnHstyC92U}fW# .j?/ɂ!Qs4Vw^+C'_RF8889yP}v1$ܣƲO>¾KaGAuMnUfaQuUuHQ'v[Ϥ-]7/z}{+o*p5\UoǕq'Y.xmsb} $ 6_)Tl=XN~\4 G߮nUWGX^{FZf"ꜯVU]6 h:е*%-1$BUuTp=h׽Th/חdƽ,Ꮷ #m?xfe9%P3*yg_ W;#EՒ) x- |s&◴W6mPIKI"ڍS`^K`Ax| q2(bGd ZO%>:k6: DQ\F3gw`9û[÷\ɗ*- Asw\q34ۇ-· yǁį٬o<!Śi#7"WlIk.m M Ot8gqvr =y~k':/},ޗKtH;D3‘5+jFd3J! %.8px>;8q3uB*iіqH8)Ъ>4Ot%ffUa'd-c @`hQEc3Ym .#18ME3E7l.#qJ VE ŗ,sM1EeG /]rّ?¹ ֺIR#{S}דՂ%»_:寯k+]Џl Z˵鋓Ytm3жe&ߵy/FxLoh6sȈt2t [iv~SojiOSi7!u=kdY3כXmƭ}jW3lsڵCb]C9ؤ4٥r8C/NOE yyQǸn'>lahM.@>l;@2k2+%+Qu&''YBbִn{=iջ[e6_tOzgv7]_&lgiAvF鷣1\S .Wn}D[Oҧ{ntI7x["dU<*T͎O|yD\c  J$CJeQ!{*g8gUgoyESh٦4*Bait͑YHVц0n a38Ч? $[-YL%[ciD\EtIU}5›tcyGhnM /ĕG=Y2+S~__Vgw߂@/i\k1] lK+1-ZIɊk]a/CmUw]i!'rK.sKw |=t>=nyKOֽt]+մDL%2~34D6/i!X[eUW/? _+53dyu(ӢkuttlSpZ4}M .wZ)CkA}ϗq|=|v׳ CBE@Ux!(4,EIp]5x7ܥS7=)-k{[U5<_~OqHE{c2?oC?=m(coSeid`#;HS3aރDŽ{KژKp%ǡL.7AY.'w? n]Hy:5Jœ Xg d $`8'}b]vfNʃ8AJ-xt׵HG@p]I*J?BIKe'0h l*Wc  ڨzFE+=ӈR72N&5!~k[FTű+_e[\d;s?_jwj1Z:z!t$լϔ!D0ɳ (j XozfUe6HH[ B@=D&Q| YZf*)YuJB`{,tUѻktz]YOxeB(EA>@; ,;"BҪ0aOߋw#{ <5t`\zI .wnYo>?69=B?!;e7uGAk3pǕ~2 b:UV+^t7tc{3}0FɠޭA9SiR  C 9A(Fߒ! 1%B:TϬuIUL8.A) Q䍐,նS md)LuVPb77_>w}/g$G uRSK:iu1+q5kVn86yLz{%YFtƆrn<_7m vBRͿy!Fp dM]_~~P {ڽhv]>E[PŶ!BZ?GGTĕp,9NEZmT8'cxy='uLi_F5ȠrDc)sV)|+v8GtJ87J?n3P5. .F%️YX[nPE@0K!E B :%ja6 x,ff"G2 /:J{*'TgE[MvN]=}0{&lv[sFn܋%(Y+!a{Fhג_(Y 1&}cq j9)(@fa d{yj $ÝL-uxG*tkgvMoMy<{ޚw4]֖{k..\vyw1.BoyΝ|uq9vA:ϭ/XU;Mדnl#_xF?“"##t&9:MYFVᩜts'z9N:: 'NR)4Gh*{NJN{fmփxLIVe謏%}F9pʢ()*k gv]~` j s]+p6%1sˑ־0fǴb^}!*u,Xp&dɆɼca6D 8 sM a?}JGiA|Ejٹly.rt[sKʃUdujԧ9,; YeeԒFWz+&A"(|U hj3"]c=cJ=^b ۄJpK0_((oS]8uhz'w>_$/esz&+<UAm4-Swn2&a^,PRS|H@k13//ǣT) 5&?I~ԏͿflt=.>%_u_Lԛsn.ՉF%&5I.yyy҄c6pȝ{_Jw">+Ϳ\±Sn?%mm-Fn>D4 nxƛKu&nY v 6_ w-;;A"XIN_Ւ%Emevg0fT)r#5;jUQ=NRH=!hU6Ed9ؕ*Dg1zAMt3.E*(4$id$G01K ՜I\);TaJ1p.Špw`)2C$>Q3vVlq?O2Js;jy6 n#h-V` @a{EaDJ&GW,ܧCtS  ZZ=ڀVuت?,Zb݄-kv_Q8d0tߠJ[v^*n^hq_٘"jKpDuBu΀&,re@9XB L$:5p5Vyr$&$\`z\ЪϾD!&߬Nu8^Kg+Ҋ()X(u:+M1ڈt{Q^=.ˍL[}%|L\_5up4 n!r?5/nq#E^7] u=<r{p|I[7\gKϨ}ۦs);voIN[ԯlcvMekjernM]I*|dRG%s喎4ը t/{h CnYoЗw w3G@Ibnq<2E}W=X][*ˏiIbf>yYzf-K3g.ix!557< -/h{TzYum~1BLz&l=,xuay%W^-4߽kM\ښd=RQR,f2FV\1]6a(⤞X4z}8 sDh0 1&u"b 6"Zcdd\f(UyFb%< `!"(,!̯'j[zvllnDr&hB|s>1 .aÕ ﯿa]1>hѵk0G]L#<5~]YłO ~P_'}ރ-, 7M7>.rߋz-?-þC÷9+.zMTbS5F,'jy7/wla] z?b^(^Böz(t5tw-ۻ4G(t d'²ꟅYN@&+ŒPm|eĸDQdoUgtٓ*R'9"{-BIOT1'uER;T;f(24KZGA0ܖϿa+mWy*>LF7G"^5.v?P .+x+~qS\JH}ןzn{pS;dWEĨPiJ jr^E7g!Lqt`cȏq}_'8:k]~wlV%]0=06A}]rEab` {en4Zf%y?8aPBs\Q_[V8pf-y-T}XMƟ.Zgyz[~uKY5{kF)NZr2XE+Pc5CVNJ((( bTzdyf_~\#%x"[gcE5(w۟p^lSb pNж)nͅIdyT&Y!F0IT -ҠWJ&ɨ'!"9p{j h lz|?EE]j-C+ƈD =0VQt" /& Ҕ _w}ЫݛCR,!Ւ4 uՎ& ?hU]}J_1*)T4F0,D3ʃqWB #Asʺ\;] kߣWsbA~d 1ќ3ñ00P[44Bv=SעR:Eri@sJ&h,'-Qy&kCT#tp:RH'dQ]qGNl薞ʺǰ,y(~oy*[lE?]Ưn}&8˨h0gUnM1O Q҉`-+PYE*6d;Ht$Z ?*$Zѓ@'+80|JThOPTA<œ))C%+E!2Υ :8 YUI$d`$PO1&$VTpDZXoyHw>xÁsG@A{τvoy&ÅPhxKPm,>S9GU?^~4$#T@l奷Ub `:4t=C厚n.g"/H.I^9ȁ VyNQD5QģɶJ#נC#27ppҵ=ۤS܌z9(s]|+ 9 /zΡmYwڦ~17.G㯣<;~Qe|=xaQ^= g'm, 3_6ZDzw!*浭;;LPO͎6&|՗Ͼƒ$+'L%s*E X;>`"Kg:yP*%ԙq8Ҫot2:2$/* /CNV5Sf|ŠyPB&3Qe@V$^PLk0[d67OS-MM+(v,goF[ܾy=!9]h?RTiI兒 \WθP1!pmbAܵ*|]8.q;!.V0eѢCH0&41NkobB> h(ЀЯ|F9Yۗ"5/j>:LӒtQd^K= E)=R Lt}DWԣy5Vhqc,_yL#Ui!ĩ\Q[.JYŜȄiD.6V)xX9 ]'LtQ͙xWY['=8Hyby #'0ӸkT |3t-z/O/O^'ҚRZ-3\;UYއy-ZVJhH9pVUTeR Lݹ^U.Ϧ z >KpsyPVOؽԞm<NO.fm݈;q 1 SϠ_] +Mfլ'mT?FU4*A<3rQ Uh$>H4sBh$"t`YRH9aUc>&nU2KXIrN .r *pFidbohr!Dr١Dy%ϸɂFђ UQyI7k8r7+Y㵶Ba!JZw~>s_1-o<kKGdZD >栵G @`SDeHDJ22pE`} z]k9ʒ@ˇQa<\>EGz׫~틣_yoq;:|'}MQK*O@yRky) ͂ :xJQWrE|"m9 1%[Jm=ѫL|a֋<}~k Z,k|O]g4k1_ fg vE&xLŞi]8Wίi6 (\|&ySlk>[bA}|]vewz+/_ٰuM(K2$iGP(#Z ScJ#!'4JTi}v|еYrEr_tJɝ<͋v [tznfǤ ` nht{et[<=cv3iidC#W{xoH|eFxn+;Z=^Đ \ Eptb 6IVe EdtDz}`F9tʒ*)*՗:K|HB f~z7X%V.PpH/aHk_aj0wMb*^}"JkOPbq6~3 RʅO'QK7'ޠ`Zb.YNV02-.Rv/y|+p#bxk=kPg<>KiTO xd.,?_4tbkǗ90%OF lrhPyMQXTT΁wLrt#3! HXb ZcR󔍵sA1/[es^DP\̘crYDmr.H`"S2= ΞwL?]+c+Z_MgoJܼfe?ho{B_Clܛbڧ5W@I.|D2D.sk.5OGG߄`Af ĜU(k$Йdep)&p9A gJ3wson[q O \{U##c;EVhdywVx(~RADLƪ3*yΣI!,Gw1vq]w=;s[j43A3.Ԋ9iICADQ"Ygƈ@I,X`ZhU.*W\h!D`9d qzfp4ҥȡo ٗ:{T{ ,|Xx(`{PhrF8KXlSw!/d]N]ޫΞ$s`1^1|}=:(cP$shVAHå$ 55Xҋ"ㅐI&裶ejMrHb>qK%Zc!3>e2/C3s γ1^$%K!I_tY. dV3HY0M8Ec2b>Efٹly.R[ywvb4o]ӘUL$ӕ dI2ZD9)J\m&$㼫6v@66MTJ)l B;VܱTf;0Ź~FM;ZYhzfOJb2d2fA) Q䍐-]"NE%`ւKRp%qZH/mGǁf|ϩ㷣v[9_Ͱٽ޳?U~kQ209)r%sOs4{%FCq 5%z{!ݴƫ~}-!Dub7h[mo(Qzk=?JĐRIk<* qƅ 69k}ZgP%ay5EtYH<S4d؜/).(So3M @89lFP7#RZ\`&܃@eN<}NϠp&,E8fC`[bhK(o59E4u?☉<Q<]^^\pWv:&gmYUj[7us?>0K|qZxХ6ėZ(IHwMT KDz W=#ƇDu c1>cA2(JDPZEb(#Ouj{V;Wt|- 啾p|RuE,?vhDgu<X4DpÖtB-e=r6An92h1VWOgI"0ceU'=Q҇iJՑE)U6BQ,oq>:tNfýƓڸ1t<0I J.mg0wۼ͈٘lb-Z|VCyj)3Rj#Qiαj]'VwVn\GZҖ+Z{z/paLP1DXcdd\(<#1D4$3]Xk;B90eyko}YwY/Ri 5=oRc#p?˺8I23_fl) #y$eJU];I{̀{wX uDf%tvLSVRL),'{:PFCTu9fr8ͲQ- *DOSepa3sTLLb Q[%QFΎKiz4vR<9R%)Jop/^G9axlٽaYnY.+8IgeLsʥ4#>ONyvϞA\:ŗu,oi֒ alp#dFSaG$!)vhGACcP]8LV 7N0B+b]>2~sf.VU2iO2>ddرj[Ml$2jas$%I-SSG*Gxyzmv7>? Gz22DT%+@ώzf#T&Ɖ +I;;薔V< i4?ZXKx7E7o$a$ !:y&;Lհp{=j<UƄ/~j&(տW 0Eq<Î57 @QC CᆱHWt>׾?MIA F/?9 '{1M@!?G׫ גU3;|A^vFZxk͈iV>cIi@vɍͻp`g"nr:𲤧簣oM7z@M׸,{=Uc (ZH>n\?Kn28o88=tFa;ZqPÙNӥ\Mgp]-OPœ 90 "$R.1֓ Z/ɍW0a(%PLJb1 WqUy쪃Q_mc`~Xy1Ja:Xv~&d>n>PԽKuHťbITOUj\ڽ'wU8yeoR{pƟz7Vp\2W3ZMv\q DD.[Xv׃w2y{I{̒QO܊"Z㉽"DO6#UU{_w,o6zi%Xc8tOK` bK|P_Y[/'ѤJ^ӊgB_C77ua*Pt9\=`kNGӋ_0ǐk ϯGvQ1qTJsfFZ8PVe@V:o $y|ѫ^Uܿ%>JBۺto}A֓N:h[hlh{.{ PcxbŸj`W1XBO-3js~V7W`-IR 'Es>f53ߪPsXnJ-lJ J6Y P*erɗBQ_ztIxPRK]TZE>ؽh7,1'qoy[5&k\&>ɺC?1#: x:Ϯ?ٴas/7gv1TJމg(9ze;gAS-{1uG_Ȓa<Mzho,ܪ>GY?LMĬ¯/Dct"/d:HBQ&A~{xj }5{rѫX LF}~(2Hepw%Ƀw?~{%ͤR8Jb.vQ+kW=nڐEQK/3Kj)` s2Y&_, $9%V'"JJv*!R;9`7uOD\!tE\COE\!*sWs% +dK8܀Z+kى(vU k+oFq__{ sUz;%9l~gZ^+"sF!%g5>'R3^(j2D!8'ܦ83gGvFP.[} ; 5YQ}''})^9ƍpZ %6a}TظFEL1ISsqmݹYu8\۫SZ Vk c1ϟҰA? :+AYQ67ކٴڼ^ Gwu xmI^ǖ~ؔσ- U/Ԇ,5hjQeZm˴˴҄0T'e%(jT*R8-K-%qdIЮS1LYHj&#ZE4:cV8!"2NM"2 {@,bHp+Da7UHOJffڔӃQ뽥B$2ALye,w)j75rve1U Ix8\UDߺ\a4ڞȏub6 ha_xx5luU^`jLT%cSxGY5&)mv&r+iQHf9v_Lπ 2xkGJ e{0=._~lV.7ܺۦםq0J0hݎ}λhw zѥ˺rT/r@[o|ɚ_t$}sޛ}%J_\ !n%F:ѾjuTZ+u A^Wk\`saܗ)DZheNVuFtyF\5c2Q`+ذ9I$Dj蚃J2RH3A,,CR3 mH1a`3^dU@z&('@J疍ȹHo1b?}Y*UbyVZ&%5o^چzMcZUb:}U.nV|edX5V 6ƙRpJ0,yƢI!C*yM#U5be&@ˈ35!1+D0K8 )YhKIT ڳdg0LOftv.[2R-iTr_ҝFǴN22pI+$"*)/Lq cG&)`ыN{~;]m-̮wur )BГS@r=8VңB*!Fc.|in" ϛxBe:l#X:%eSzC)ׁ]N)iDS` 4$aCka,ecm2γ̬Uʶ>19m}`!bǓ w_zf񮛡A!n>T٩*g:3i8iZ\7%7(trpJIa+Ab.5Ke&3˃o:RΪd)E_7u ggOgQAq*ھlwZ=;zdgl;ilXoSԉD0͹ѭ[ЮaH諳#ɎΎ WwX\Cٯ4ZP(`4q zp[kA #.D'1n>\J߁߀ t?g~-E$Cqx_0 fpb21x{t/R3E)L=U|jxS]fX0A2ln^AְX*Ţ+F4a9.&== vO8>;(zvIKt:E0$đat=T*)CpwɓqGr#=K~[L-'EfDDpY`~*HN6]ڮ35[ͺ- uDmuNSNdKx !6prp2xӖK8ECI)#spQKOiggRSߪ˝ڨv08&0߿ZY{'X΂aVb4Q]2_!˦Cq·WJ2ɤ'Oo2(k099FraT0bua9 Gz?M w_ap|gRM9+}*JCAMȾi:7tmix$>y6b떋YmI pDPp$嶦)&h\+yumj1y6Fԥ̒gV,5ƼӮ?~#hsgnjՊgsۻ8!8jK$XYmXQ1:88{$ {XTR(sշueᅖ_BO x6(>yy;G+NzD#IɅ2 B`Z @.8ţRm7|heKYO`#O;^?Npedァrk,ARjOJ C)5;"iٟ-Q:g\IesŭGDR[XJo] E ŮT5np2rg bH` H InrftWSnj \kv7,BfJF%A)@ÌKlLX_i$Ŕs=ѧJ)l0’[9Wklhļ#R"%1dNF03)e N+Jo l RMí 3 pyX~yul< V mX0"0sx 6AY2pwv10I) Bb!K% Mid:ɱԖ6Riw1A3^cKr,3k#ɘBa%r|eH_sWkgԗ\dGɍdR$b/Qvx6" VuPܳL w]PŰ VrǙ)|vA+*&8*E.W=DbAzZ}a,1mV9h֣t{ ͘|/Ga?%LB ;%6kIjTLc4iuV%݇ Ͼ5o-qMdkz >(浪'EXHO|0g唋eA;"ɯ7J==KN7D0,"{w$hNmlo4%wz:MHqJ H"-,'$`"f$%PLl1^)?~)E٢\'ӿ9ӹǽQ= .5`tR/!+owgJ>sJ{ԝ9CEpNXX F~ #S;m&Wpx"q.R{ʠxݫa 3'!匵@m#7j`7HEn^lXNYÃłI Z!x[d 1a=_Scи*+ .5+FC@p:Zzs4hުȇxbȇY"/(wè\{г.^b b_q(/\Rks~4m됀6J!۠LnL )&zK'V}ˎZq tЂ[?YEC#d:P,0"M Aa7?5*1h]F:X2 _q6*Z92ITbZg#v ]wtQw0N3M1H%7ih 5(O+!DDDk $j|! D36F̤Ek0!3M!A#g89 +B"9&r6)F^Jzn0:?'4c`bҹVpb#VCCq5x*tjNQLJ[}M)mޓk*R)^@Ӏ9+S,^ӋsP~\jvB⺾;NYꙂO/wkC]RA'eNRIw/jZjm'Es+Ivn?ac=ӖkkB5;,xbL[LܫÊUu;V;wOPa dVj"=I?UjZؕܐQu\85y(e"D{8nE0'uɛv2 #;7/ߎZ&6K܎J1gqJ%[ŔS~mYRO-3OxQBv..$˂I-ۡuE4ܶmpIM){LR6:YRfUiaRN.nVIdx2,Fٳ/0Iu0 aOB)g2?H @.ϣXz/2D kpAGYtKktÜ:`g Pހ*r{ɣ[\ɸ۫KFeJ c^JSVpɤ^V(={0R5L*9Kǒ(6IL_[S>^͵Y/`(2!scɑwx"bcL7Bk/~h[p ` OAss}7'(# g2 D{Sl`JG)Z8mrך]^vL?biNp>Orۜh]+3c 5+ 002\K| 6H)Qme֎W?MKlH8w (v!lo ݹ2ɕ)u`IM{BA P[&x2dnq968*g쬘M|~Ҕkm6#٢3$b%Xt $8^U[`cVd,MoT]l%v$t @S,G9FejF^_>ޏ l]Ac1 b\gnW=S\20M6@ET"`.`͙6$Q' vt3t|qk؆ZlՖcq"p, LxTwc-%2TgRcSyD2Y+냉KM-a)i% Xy$tc"-Y1Ђ>mwZspM ﬛&ZTVK*| 3) ڗ"Νh+K5v;J2PڽihM ujTMrnݟ:iWnٮd* Vo,wQS ZUe)^; פ2R &uD9VEy &zG;A[dٷ~|->׏poc vGntv |ݜ]DlwH뾐sEi$U] Ѹ?|gTOc>3(e\q^wfb078kSylz :9륰w[JH:~BW]sFWPrɮ!O{TJlj;.LD%6ᥓVAXvൣ֖Hd@RrpY^jzgS:kM>qkF,u!1ÍtdOAGo% ƀRo(=(Eh!9oC>xH6!̢+%}1vg[kw)1(~F$v)ʤ?‡*8+` AD%h"-͉247"i ȿhC%ɑELk҉"ڗ.]͊gn xqV唒mƋ5')&'7jNrܾ9-Lo9mztBz2hrJ$s(|)=;W?(VysAGE$# jwOE"ItF) (RBdZGh,jV)%ֈȣ9>$(A'\* YmB#U2cge,UbqS[[lG''xs5j;Mލ/b$60Iw%"H!Vb'Bʔ,(ee,,q| ٳe4M&I5A%F E1tRln2+.Xbұ+V V{@k:^m EOLRkR\E^S-y£=np$i=dDы,f'1DtiK1w&օ$TGema{Xq0E,&7EE,v:dbt=SPV&yѩJPj"#@TF@H|g`T4e;G%(;)#tQ}P"cgE#jvq^wʵKkŤvQb]\]!HOJwS pvD H^hf[^PIqKB) v.bұ+Pށ ;+|=|9rjE0&x !~B vI[mZ%z1RwA ՉFZsFBmךZ V _FV+Kq= [nd26fh]!%JnRzRLN(y((zRXKXknP b=I:e%C]- n<^HDFrP9%_*#!d:#3Gv)Eve'>j`C3Cn@']nɵ'M\d@h=L#Sxk]<>|ѳ1mGGnlG.L'^|>x3WFdok{A=;wܗ]#앻n׋C_\v}/;i Ue|s}"x9pb dž@ٯ겙6&YjRZX 񠣑 q!(牖[i$KJI.97 ^Oo~.z׃Ր}Y@zFnrL-S7[`λ-ÀW rnUU~,VE9_B~mf۵nw^ NJJrd)??I(NoŰX)uwW ?vyإ3>g?v޵4,Jnǥ\LT'&ԥg,|38QKrH%"/Gb[a= qo3 equ(3KF2!t Z1v AvIS_\SP!G=qn Q!/"r]L#K4K"$$h``ئw6+z!ԶdX-BiA'LtC@4*Y &3h^5QO~vI8$x 2JIB+aD=F&D2AvHrq6 е6řڻMч~ŚŠav/rjIhc_dUG: z\f/LK5#Wr@P$'E))˻4PG ez"9#qjkThA)J̢R%OJh-f g}Yo9Y&9g~ y4)č~b,ɳԚMUA_Q1C;쨪5^vݛʛBǑ?h|EZTj ^]{~b mnI[#ȡgD2P#E{Z9jMc3^g7;H+Zd|r)N_ۋ'8pi8U_.&ۈ`1;!UMÊ]TKVJ:][jKMJѐLU0y?bxXiM'E"?RUk\LPhXV6wue32CȠR,#NhTlJU7I713@R6(czfl $ TR$2$9p.>Q~Q{$ZW)v6/X/dm3/1s^yЌϰ4b}x| %?S_i9y;^)JlyGU%jYR4" "~窆8k6`؄[^yPݖ+Ayp88H "R4m zLʉ9N1pͭ2+,с!l.I K݀9t>` HX]eN}(ݿßzN9 x̀bT{W1$fR m'J "{p`!r'ƻd`*!hBP=>!p 4)HDTCx8- Ue# AĤH@IL@D*'RXz&́1xF`}ublQ7X-~qEJ[4frSkl\\uzE| X # WDT` jOIVͩOy `zzM`y4cI2^)w:)yIhiqh$Gk30N\ pQK-䆅+=[N''[~ <((z"*=o:Ja7[;h}1RFY/B mb@Qimfp٩f{& F<+p7* <ѵO$$`i'n`JFH{h` =!R&>O7} ׽'H\gxwऌL䤋z(zy-%b@xךsfx%.%L")maP_VcP-?2q7bH? . EN )>lEqYx6-\G yY=o?>DV79̄]B|%h詋2[\HՅr:|j}t  nu 5}>mz.x}چkn>G\N]v'9}t]GWogc{$db2;mj Ox1N&~;CHEo&NVG('ǗBl̟ #k5af:_57/y|UߤLdqî~9Y/V=s4}3^(vBht{˟Um>ϛRfg3?>;# d أd8М g__F_#7/*?ͅPѝߚWl j_bCInptВ6O6Τǫ 6Lɜ^*g-6Fmav7.k3sm)^ftu 491ˣl0hm"h?Mj_!wv[~QwydPT~>Nrisb2;8,x?L~F`u<V[dmr W6),~|YhC1?!y8?*߰Q:99 Փ'sDDͅUTZP-'T85O+}79=gBǼTs6el{IG_Vߏ'Y??'X&R;p俪b0۩5_qwm|.ߡxOpڝ1OFi=f>wBF*_]Rk}j#IJ_n&v qmo?\8B’o~Y-!@!J #@_YYǽ2A݉[&6>+bIF=MN;pWk=ѼSDp.M]tzrtYl]gW(z+S=LY,Wz1%\1a9Pm2wqXhBK W{gvԻԺh 1sM,a!H!53}Xk90^w1+N"ھi( w^Vq![RٗԸ^I F.1~\fCPq:[9or $NGn,'SEڬB݃-' 5aX66Gb\Dؗt!K.>R~22rElx59yof-r}L<L2?|R޳/ RNm@ 2d?2EO[1f{h0pUr.a>]"Ĩ?"57vþq;?or_]dnYߛ^2귿d9s73V,&ĿdG7U3WB`D$Qdlxx'S%4J^)$xt-umK!5^;.Q]j-C+ƈD =0VQ" G)ciJhwxwVxtXb8/_ 1%>nYf[07mwLh^z ȧ9ȩv_/NEv2trל^IJq~O*6O-2o֟đR^HOrVnwخiyr썹24A}ttz13 x1ʏ Q҉`ΖpyV? l 6Hrd!f!O Iʘ2x:%B`suXHcRh27]9{|-5BA˕`}^NAu0 `ĵu 6!R'L@C20H^z['&y GCOqϺ:+kL Mi۽K@192wJ")@65Jx4溋K0 3o/Sb0gHUuݴ-:={!gk[0lx)GK nՄ}؊V8r#z4rTY&(P! W6i x*3L~YzF4Ɗ{<_AW- v`@7޾y(n߬9A# %RB]ɴQ@Bk351mmCn\fK(jьmF7 zI1,(TKL ej,RC"u1X8ZY]Ęɤ !>KGpGm`*Qfs"WE@M$ IwlaY5Vޅ/W;$U8;f m;e ]? ́BEJ/'R^KQ+8Df'gF ,bQWߡ]Zr]Os-B+3FSw2)F?ߙkLhY:+m"5VgUm]Lu .нM^dU{Up\>QAX+* &ç`sQ 6]e#BZ-wk_wm7O2,':u[V{f\"Remgk`zݘwcvbk@5.$X;cVX-Tx@$由"WWQ8%3xC-@r@Y"Fzٴ,+?m:_5uWTFzݟz<9mʤ[vSv2c,whQhR>& aUWw hOȽ7P x5%JQ9Gr JFet1yOrޯ2q F"E$\;Jp TiX%c9RLMe!e^9EylˌW쁷-[L&0|j7>N'o\bZIZ-uփ2*%N9sb*%G9xY, O"wFcEF,6A DFж#&)@"AwJa\01wEj]ڼ=ٌkeh)d" ZTkC0 a q<-܂p$vIZȈ @+:pFb40G]1Hՠ+, ܋mN9f^نQ4Akя/(6f!Fi[Ul_ejuH%o[ QQo+X{JfѡgյLDԂpY;F(]ԑ|.OL{%Ve㜹KU{csPMf/r Vth{ȮeE92;'#̧~| N0-Sdg2PF,7J5%Q T68KrI܄uj}ȵ/2&6d<.Halo<.\7LPqTJ{\^DžD6vRn*뎕qvA<7!1ǒ^ooƣi{*Z tȚZx,^vh 9U^_s>r]rz:hJv&n\|KrAZ]|FC(otg`RzPO \ C7xa3^4k2S!?Y)KL{2\AԾL-绎22( tĕP07*+L.Jh/^k2FD<0:? 30j NHO_ \NG ?GtFJì?{7+bvbw?Qj7.Ůp:w!a)峁̮Wȭ8v\y\ڈѝl! 'Ddi*mh:ٲ}5PF#׾->un7n:v\ `t>&soUeBgus݉=NmUkyn$Cr=*'C>c)sy92R=}D?Dӹ6N.+M4A ΕFZrFBiךZ VTnvo(je\DJƘ+546cl r"lTG^;5ުQ7\q(. gN4",̢˯PT+'Fr& j&U3YX3ypb ܇Eu/e @#,J{Urtd%Nyfb`k@ϽBfn<6yXtQ'`A& %8Thţ$Ƥ8fMHnki,˕eY}:j&fK lCi糋Řz{>/3s1<^f2-׹J$**'9 ,HI\~;@u0n_{o@$52<:᭶&.iOuTx,*UĶ~-vv11ǓhwV^gE>Ţr_yOYjénݳ i 93T{( QTq)rn*}I)"yyG=FfhTJA|ktBbSr&ƀ$2@RREުɄʘIB81c˓\xJ<80Zks R%al{'"ty1\ gUH :OE/|Z<ȰϧGfQA-[4ߏewU<^yKNk] ˵Pm)U- -AZJH:!6u kz Փ(>g|X!Q R>穜;O *<~\9=cSADʓ64IlkeRνwb[eVX ݛx\mJ_A8`/LQdz帻)w-/C 3Ǚ%$Tt'&}\D|kj'*P9R;0Mm6ha`39L ͙ë)Z~U3*w~[qSë^dˤ+ X G47L"7"5rB<>}:O;3.B=!׆+~< uT=`H72]*=)&lEq8TƻE7zi!g822示~EVW9̄4M!vI=oNdHDDLmqb9j֤,g۱nTp\7-sq!nû4Ir*z]DWo1\LF*73y=UŭU\u!3!tjB9zq}I)ŦUs!FSN}K޼_oS&2iW+^_73( gןRr{\g:-~e>۳ꜞKUur&W$PP'Z^y44ٟ"͟7͛ ~Pv9G?O)L{_W$Ph*k ,yNqh>^d.W/Հ4}%A@ֶ S^XN8%ףlv1nɆ8hQ1d5%|Ơyƫbq<qGd9 ƽŠ7E[,!.ڇޥ=˿*~1akE=SM_&dVc㏣qCOIюK^hXiIADgYfq§9!̣-oq{o?(P|]QEĿ*AK*-p)~^z gat}yWVi=]t5\g߆y֏0.Pe(Ed@T ݲ^q5benfSq۝>0Ox0Ӂ^<0 ur]L਎Y:NysRȔ>0KIU>P=~^) )oM1G6 1ӊD-cexZD83F ⯟/Lӻ7m*uy! qT IpсfJ=B(*$=,uV޸N,T~}-W#tH{ S ɛUoBQ| =\>j Twi^@*s mM6nx1S_dNvؿPvc3sQ%6 @OSt3EڳlbRRHPmJ/ҮmS.]o9ޥ N#*(ޥ ޸THjE0舓".K5H * (i0}WhڵzU r㦳S9B..tzK׆0ŀ BnᣵE+=mv7XTH,y4Uܫ4pKpV[hQDNQGܮh8p?-=j IH chg8p46%Bq%as u8Zd."U_*Xi-?Ĝ8H>AaLYCLvr=_t뤸"ej߀Z A!լvq:/bIAqM7q[zUZ}F7Xc](vCVݼ?ug|Ì9rgCUh68şRmsֿσozg2or:˒䜝F^MOCu !"q׃~/צV/B-lxr`כf4:{9F(z2&'J24N\Lg]ā)?#rSK"Eg#19G;HEV+v5>ȾWһu*&<[-ƫ:㡰g1<8ё#͈6׎.nfY|1\ćTo$- :V? ^%C%Z?JS1}T=k^%H4u)QP$aEϹ''JxMc+W} FjX?d6ExKk;^>y 5#iAO/sGYZe;t|BYP}TTmsH@ L>\ hC"E"RHxb ')y`NEkClxXSsu5ϕKQJҡ6٘ɍFP*JJMd٧ȝ "i$43vБxI~rE:ZD=_?,d_]+g^!3//pM>4xl0t܇><Z5ؼ]ƟF+2?Wo=gO?ً=pU;ǭޅZ&@e7\<>K+`Ž̈́-/"ttX3z]ږ76X{UX^y\z(.M~quQ"m(Sj.t+wL{8 e%2UH \F[ *BkarP]_/pȍ>R`oN_-Ff4|Ή6qeVV=Tv>u#W'9/V tIj׆nwYHK^>cQ)E?*9*!QZɣev*E簞gǃ"b[YYeB }_ _Hjtξ=.Vkn*=-~6(GK8{w3v=iMMzfY$_R:Qnթ@ˣ[!gT 8)sVYk=zXeJȷ$_nٛ/ý+vp?Q5gN};5?^h5E;7CɗY΢]pDV&w㼯Ie8 ;7<ᏵY!~-G\ٸkO)' Vj=^%/^ˏs =L?|PW>)~]yMRۜ|W2v2Vz9~? %)G@^ħ(/d;z_?wzj(۔>z׏H].qf I ShDzg:k%bQ_3i'7`yJ9(^'>]˯ >uZ3Q(-Xa!ń+p5)V~62y}oMi d|˥=i2x{ ߉>N4'upO͕B2`705`ȤeBFWDkDZ =Cxbo"'Jhs>ڑs7v|;ai>w f^FA@!9c6xQzNn15!rg bTmAL&ʌdrB(D"tJiXl Άg}f/-$Y&ysyZ>_Y=X|3N_ՇKhhp՗!RBk%èD3nbi{33puթ@jMY6OTSLZ\arl_aۮG]A`{; _1*ج\|5Q; $RphFƆToI -꼧|ꂻ9[˯ogYew}iwLx5#iZaG״_ *oҕشxQNZUci[X@(kY9`GLԗnQ/݊噳FX Zi̅  3`ܣ1oQz.S*Fi&ruq@,\"Q sڟTɆwZnS=҇6z}SwKSi9LJhGJ8Ea ZL xILR-B u"i4+D<NPDpJη'j|?)7Q-¡8.J7519ٵJ3kJ3Au6RX O29+C$hegZ*+HO碽6&|TDcٝiˆǮ 3"3":dZTYFmD*eW8195fc^M+mpjT,eqy QFI( >'dmA뇈CE=ڴ٘ʋa^=/6 = (eF;#@4hQ-`QE & A`I"mƤ-|(wP"7mU5N8dQDcdFQ:=n{?-Y*>n_:#žq' ;ܟJ Fy |/ 9XF) L!m LZf-K뫊Vf(Ԡ?6k(7!h8HR0IC21)#knuŀƥouhBfs%ξϋ- z(& .nZ!T~:X!Ab6J 6&(AsEbI(Px07lJ('7Б>.6.ROW _*qT*Iԇ>O?6,14XHIVXO A'Igw\+K'N[ZZN3`AHJ!HD#*_T#!>P$\79bЖ(wg]^֡l32L*yyr܁\ m:q76+Bέ^}:ޓg ]eEuu4gՅ[6{}n廛o.cAKwxEo.xoz֍?pVJ{/qӢ9/}:uyɃtsy>wQJ˭͏dSt0vpiW gpF)y_d KF\m|+f˫r)ҧM-Rk]}DI *)XEhZ>[^kkQ-֎Z[" k+"u-G"P#=dh^612uՎFx"q!o牖4%SZM6'>۩KY|ym֤2W)dCRs_ė޼>y(wȼFם13\0]13Z)n^g ךJo;DW1+;UFy* ҕaR!Bt";BWUF{vJo8 -K&j+#vp:.]mVB t{uu*֢3t_8mN+, ]!\EXW*v(J_:\vgBA|-Ey ?z>xtZV='`0 \s?3x5ۼ{Q )nw7.τiԎ^r{(TXO(̶c>_ySZhaRb.}1:~Yg -ާ_F> `zyqs~sYN-7!+"T'p™"1B3!R퐖h]2f72ʥ2NG@d  :CW.NF+ZQ* ҕv!\EIW*׮2%ӡ+ J@3`cJBBW-m?!;IR2J:DW1ڣ9B2J&z:ABp r4EW%\BZwQr ҕ!ʀ5 ]\3{W-oQF)xOW ] ޜي[fplcp;C`;e[f 2=]:TJ&D K;CWWJYJF!BBg)Bt-et(=]"])Esg+FP{98完R}z oL}Q},>4"hh9PUDEam ^ؠ!,%fɏXb G &%K]/{yP }.5k҂Aބ\,wI!cS @E3ӥ$,uw,3^tn/ j$Di-@I ! UF tQ~#JH&^&(_o~|vgF5 跟'geݔ%TPJa "[ [U{7xF9:TG*&61l _Xg:_no }4F,GQ/_Ue6@6x'QGO>1'CM'1%No 2-ĩ?*hG%G4$*y@6y?u<'ۊYL%WKeJ7|DȻ@QF .a]ۊ9/qf^oeiݟkZS3h[T4lKCSF!U-~3T װjBB)jRs#iJj @;CW$]+D?]!JhOW'HWJ+Mt*Ѯ2\VUFixOW'HWco %;nw*mxMFهל$]X  ѝh WɮNW%9#FQutcp;LyjKG-њ]m,tU.C:DWX* ]e3thՑ2YmxOWHWʥS6Fs᜙3&!ٻFn,W~R6/4# IKƃmm˒[v{YzdU.4Z(a\T[GaҴIU4m M#\MT[h:=ti:wHhIz ㋂Jq='a5;iS:\rO'>Io/h7=Qn =h"Bp?+M[*5t()!]I`GS[CW.ohU㇜3ʆut6t@*&gkpi ]e`FY BЕcXAM !)p)o ]e*T{+CSEthph%k|dF)o`Ǫs|@OOW{'WW'vC+Nsxž(+ AW]AGWV=P#l]eFhh:]e ;+f'dC{s{MmPY9GaQzKBDJ(Qw8~%S6;'Rgm3Sérn~5;f[w^7C{ WfVhoQ2f7㜂-+Li{2\ޚ[򽞮2Ji:zt%8׬Mui]!\hm:]eHWk݀ A!\CX[*tQVkGW2\hM\AFy*lZyGWoBWZH"ۤ0Ai]tUF+Qn=ҕF*jZCWFBZ3vgs;V=='3zzgp?j/cW'j%kVt]Z 2Et5 WUFOWQ]CpZDWXpp%m ]eڋx/hGW S4<)(rSG)|ͨ1Bw\RLVвr_\Tp5 gg3MbٔAQyg|_OT8J80A]>0O_)OB 6D-P,p!yT.Iz 8c4T_Ѩ8nTɢql|^}(]炏~y7>ad׹J?tZ S:2St~Al:YZgb4[Q ia-W@Y|u1=0=!X8Vtwrg6M8 ^$%ډ?]]]^X|Ϳ_Wz#첖19)Cuc?ޠ|y,xnD(Se xa(юOy~a]2u71pϞϲ+ RI.y'A?}԰{YW΢mnP3sK<^ۛ̊6l?ya&5?j7G;^drqoG(/ѣv_,k~^mTX/鋙ߍƽغ5ϚO4L e۹bTjbg?  lG =pHϒ!Y|gRY@ggx/4ԋ\Y\}Dtdr; K8=NûrF}c<*^:vU:%,S{{{eIQ[5jcM4迂PNjn]i((l_7sŧdjPYAxUE:-նF"BNePYVtypU>uU"}UTXe'9Cd]婳FG/Rk#mtkjAGc@Aɴ,^T6F)D; &80sG`T؄E'r;1gz5b#e]uLuk=et ~,z5`<>*Z|n7|g7lqD2)!$GJDba ᑛ<2(-BZjDJ@b HG4Bԃa#Q t8H'Sh_sC ?b4 .}Wϵ^*Md%|߭ws[:kMM6)G:*O/3mrRNf9%<3M{uf+߾k)+t>*"QI< `Dg/%Dƨը{ZQjBHΑ<%F@"^{-PBGA'\* YmBk*qamq( [Ņ Dl/VC̣lǏc?׿iÇ~o<=hT&:1J(EE&%CFʔ,(ejYYg%9hM&I5A%F "WfƮ vvAMaڬ):&[6'&rj @)"y)by«=\"V>LB*CFehbvCDwFD Iѩښ68tq4ꅘ)X}ʈfF#vב$+ C虂2qۈNUx`R&0/*$W+# H730I*`Aq2ϸQƼE>ϤPE9fF vF|Xyq\W:kCyQ̋Ŏ7NFғMwD H^hfF-/$%!:^</6k<@a2?'#bzgQO+OV%rfV&rFQ=E?*qIZ7h RvVLXڛ|u.'QvQljfXDaUD[nJe/FYq)dtk ))9ӞSE11.DXPq*P#4Z"@Й&b^S.U﷎4m]gIZvGƭR^vv' *E <]>$C(1L3#S3\ 3/3Pfs-kvxab灗Mq9шHW;6e96&S[PNtۻG غ3;x&4<[oǚTTh p-<EQJ )=)U0C!$zvb۫6hڈ: XR-.:sNIWؔyr( \K]13lM?mGߧVf1esbzs:-.lYf) L9R=MEOg> a!J077>y;z :f;n^j̷d<xynYV3DWgi4o9n?LC|Sq'AKQo^U/zHex[|:-.0Xa?/6^.p'8OT)R"ON8S$*ԩ;fawKZԆal\ g+@d"@p47%,3F+Ǯf&~/-&&wë5>i;[8g}~gIf_,o4Vݚfz'zJ-&*b~W‹IpNx.xpBB`4)(<yЫ_]C2H!3)\2%ƶ !2HN3aΒZG@rGgJ6d֐>XMkۛOk#{HQ$҂L`~{!t$FA0KmԚ3LItOU+YvZF^6 alcM|\SV`\4smqXyɖ|-_ >oޔ},EnjF1D'rTY&hKC9e^ ^:PfXMGmoa\Me`_C0!:0h9Şy~7eߠCM`w+VrRR˶R)HD68u0z^gĸMEf^(t %\(|W265n3q2UQ^%! >nf8=VkDhmHUKIn]$KufAU 5Ƅ̼i msI ]Ao$rC.)9-Q޷C;! ~Y壳"AJ ۥZ#y6J_Rnc+92/N|&:e"kNS.w!N݃k-נ4RLLdެtt1`XF[kMNRaa#&%EERT%z?}3)xwyCJZmμvZ_8tW8fvOɘSGSEڀ6eZshe@JC˅:DX ^[   D(jU6G2r,F.Eg1z$a:w% }\ lpI?NAG4:#vH4'`A3Xa,(N$P7,WdIh-"}F0!t(cR[dv;Y =|kcmPYSm!M܈%6D)_57pN !O+ȃ sIc+`]0qYSP&}I$ʗZ`x1)MJ¹n׷뷰mjU-m( ]Eԍ-Ecْn}=|t[,qEi/57RWZ p*Yː)%K)E7u%*e ƅp'iCIf' dKCoBt<`M1m?8yݔw4qЗT{8SRǨ{Tűk0{FBw[N8w{歛_:]Q$nM_3L{_MWSTB^t/9fN9d4ZTc5`M1TIBת{(>9)9'< 8^t(7Zwn֜[37L=5YއTgexU%08LC|b0˯q=-a?z a>| UqGúVѠ !a2 ~*?T&X1.T/5k.)UDoQh.|(G9LLx; kʯouE~6rjG/G@9o,&Co}yy~Uf85}v1tӴh»LAyp'h hNXMqciy>@"tMA)u*w小rgZ*j>yj)3Rj#Qi3j_tԷø }$oZ/&u"bXXcddUyFb%< XCHbjC>,+-o~SzkqƁN@_M*x\Պe-[U;u}P ]_K4_/v4?Tݺ'IfKŒ-Eb4x,qa_]vmf'oHf'-*Z*xjMff]MQg<1SŜց2(Ȕ1CihamW1%. h|( 7Npol99I$+)9ZEˊik\3]p]i}ٽfYY~]15)O#vqDϨ0Ki9Gl]lQ'R82hA9aAKFjw#m ғ!0.Yzv3xME6)0NtƐ[XIzIym+B4 h#@?>(Mэqa>I98D'ϩdUX5%ܞN;9Fu5jzP&LL8aק]Tg}Za,>x\[n6%5J <\O7g4%./lk|?U^*Nڃ2x C/U3~~FI-YA:Lb>D7#^Ӭ} _& .76A?ӱ YJ]Ϊ<|XSB}QW!. zEA1 ^T0hmX6ob6} /KuDgMqFG {t#7&{ZqPÞNӱMg w''90eb]"౞N\@j$7 +Gh\)Ux9c<UQŧU.c`~Q\p;9Xa`_a|vPԽE7ӏ[SNbGz ?UqepU 9~6\S/7OZ YjOYd>W}89!cEU/Uβ7zl#WB`)˖Ge2bLbgZt+%d*"9:-bSV6[!B&I פĥ*uBe` $qRA@X+\(:Gs+歊|h'6|=iڌ|XηQM[K|ʵ6Ec,CK0EY慀?Wr݆RYnJ\ZgL)Oƕ^ )LSw7?lfHxH-iXR 54&b轔NDsJAh, 3Hc- ڨ<QJO5h)߲9רwVgFa%犺[!{8_%ۭv}]+;聴#9w3dip_@#g@ZBS}HkjD[R d %EHerɗBQPX%A9Jm`,u4r܎ԚlDtIoTTK2L͑EP&Eb%ZDiWG^5mHElvF^u:+vE>cK6 )9 җ)֙YULj-vp*GM)qf$UֺFۦ,7̏S[Q_JCnl]\ڮMx3*, 0`Ka4Hl!Q1II]<{ M3 Q%&'"*zk'BO*u.zߊJ&̩%Ͱx$Á28! Db$ۃB6jZ8b!z_4yA}p0grnj R5٠dc̠JUY!F#NHȿ2>`v5qnTUd=2?8;Zݷ5͈ Lc{T^*KPӖLd㧒9D%@ۈقj2AVa1 -O(v5!l/,Z肸V24ET4iPyME}b1;+]hZ=K`r!YF[[yQx+$@e6KEW&USUߨFbyT':HdЗJi`M0~Hv?nЏo,"d*Kq VLA#V  XF;KrIn 9h:>& +28Œbk ) x0 M=`ziUۚBh Bؚb|`t  R(VWC+Pܤe+oM>Cσ/DhD td+! Τ$12iU?(- ɀ!jPk b"w{0qRcCe t׈Bj(4cvf:PF.Ot',*k$k4Y,phm@ :x$t`!-MvH¦lpݢ> f\%\qih}6`~|9z~쿬oX>=?:=W3Ɏk͂ ]H77kF֞5`ǿ;BkgUCŪcԮcZs5YkGhγF9@a[Fo1P3fbj֓ʌ4IMG~ I'? 9k9j~Bg ڛ/`'MJTCۊJv0X4$SSAv =>]ߴUal+4OuSjCn ;:IP ˪ Gʅ?0ۢ#ɡf$U#1<RuJ` LB)1Q]Ih z hN\76xk+2(ڮHkҪUVm(5oWl:Lj='е-93FTm ?__ŗ7לAPK}4 bVsC o (B4ZSDàe +̀z12=pe6Z}~Z)A(8IO{Nq`Ic6\WAǥ11D$b9RQ Y 5L?ClN]hEjFRb g't"BQ>J+eI0H56[x#'z/n9=qUңY˜"#\ _ќS3td[bmY:}"˛V{ӻ52/e^;*"ʸÌ@l@QGG1J#'J'Pra'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qwB3rq hVxZ0{WUv<u# <= @o X?ٛ:*X._| _}A.*+f܇_Hn]}[Jo$!Oq;;Ըhc^,>7w&EG}ΐ,]sI5` XK(:ueP]c~uo@yvZ=S W3`|4~?nۯ{V;4-6lqe;"f9nٛo?\ʥiJ2mhlN]lEÓ1 ).HqA R\ ).HqA R\ ).HqA R\ ).HqA R\ ).HqA R\ ).HqA^懱srApJqA޸ 5w{ʠ CЬH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 z@6"P3 pmh{'P'kt9T4H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 @o'jʣ^m77o6 3L M@q59{q h{P޸hIKcҭΡψpr+z6th);]1J^!]E4'uŏ͆nHs+n05;]`HW"tFtŀNEW>S,卓WNWȥ75_v_ m4ej7dh".:0#l~r,ڤӾAҕ .(]1ow_ NWҋzty76qxpXǻ6dbpݍ}&=&or*Zۿ~GffBCK:zS~I\눏GǫU/I+r)o9AOzj ߋ$(~ۃ}W{1]_Zce]!\Zk?w;f2g^NyIeqqчfemm2w>ƒJJZȔ۲HeNPf23sQ6}WFA8]7i.3 nQ&ztKvNpt+/5rmwbF5ӌ MO3pCW@kwbfF"tSqNX4bBW}+Fy#f^]E>yN`C l٨+FiQ&]G.{ owlo yiuZK/ Y_tv+'tԥ׊/]#?b^ͅmNW2WHWFEŒ _d]~}mϢ+'}+FE]JJ|}ؾK*ZrOvw?~VIM[u>ͅm}iFySfC*0ƌ1B˰|/ΎePɲ=K(;9=ԟzyy}/gdzMhPiheXc zL]yْK7- 2瞗U[hJF^8vEN-8]KY͒j-k&g-q,oȟ}:! ֖%)DChΚ9=f!ͦGaIϥGZ= ңfEW 7 f.thcwbٻFn%W}L,^"g'ٗ9ޑ%${Youdjqc0ʤn?~EHWxNUCW]+B ʕݞ>] +" ]3 P Ve۶t.teAե slPwe *heà J>"]Y2+ ;{b \茺*hy J]}?t6=3rG/Wx8xPꖥ# {ziӃE WٮUAktE(ert.t;'|-]qI6uU:qhO8IWqeBՍ K.K%BIm -:LXGtw#WZYTPăJQjvQPZUTRIf4C 5+BO8mջЕR8UlegJ)Ø ]s8#bq;DW Yk:3LheUArEOW4=tEy \uU֫reCW{&]`!3^ ]PJ%?"]YDWtF]ZZ.YPrwCWL+ksjuu\'ތrZuǡ-;+Kv9i+tU*l;]ZtϺ `vF]Z WW%W=]}DZEof\ v(?Lj-G=ZI'Zz.\j-AhfA>4oΜqL ַl9bhz%`V:hE_O="gfCΫ-[7 W)֩vߗXO8+rP>g5?/snȁۘb)gQ**~?bmt~)ž'_yT\"un>bzn0Ojy+ןQ{1˛Gy]~gI ӫaMss1bl #6f/eD0#ʞ{Fe*IJS51rLA;.S紶1h&""eDk1,<`RRͨAD-y.<F&LZ$J $]4P7̈ψ_5'^TզƬ䥼 yŝ˵ "ɺ9FwXd-yE`$HIbT9>l:‡2>| [ok0z;؉e۫9b`D>'X#_X<|=30-0: >Dяkfя7½:t}ۄ= VNWxzWx5.Tѷ݌F1/ۤ%""JaB9SecrDcn;ɓ3"ԩq.3{&mA)mSʜfշ02O:(1|iQnŽ5 +2Z(bs3+Jne5e"9؄ofs-}d:ؓ%h{D.bw*5ECr k~fKi땃Iw7 ~_}ODMx:F-Ci/N&4v)3,6 [rՕ̰m [P>3G +Xo|i4Lb`tZUseSv޷:u:xruV%V:@.&1E ZxA2X=mEiPf7:fЖЖ#>7xavOQ <{d>iAWݺL  /**]QKjVzw:=.^\oJVTr~x^ﳚ |PJ0vԷE?zgn^T<_7^~Ms+t<6WKe2o?~WqO%f*Q1۔o^U/_>\m\4\1\*L:W9: FmPtr']IW-rU+trƑ:N1%4SGPR,P2zHV2C^36q`3*&Y@@Tt2}`th J(r;s@L݌䠿pv!Vg][+g*xH/ePk&d-SlI|UzDzٔ^;Zƣ y[WX-ڈ*g*),P9&0G,cR1f'/Wba%myY"w4W3\H9e i5QIYF,Fug`F'uB[/-orL HT % up7)oGBQdPZ(H=iFm]>l.yO옷-URQw`%8CE>D t1q*.9FZpJF+\}~=e\J@]8 դ.GcB%,&UH>-Mgh ΁ %Hz=J2W.)sI".bhW, fJfOO!I?btZP6Jn:/,(z0M:d|\Z˖*ɢU{;PP_ՆQYѨM}LBvZfd%HWy+$K*2!"(|j3)]Z) ;E_~K't6U&wLӁ'4n>܅+7Kxr}&&+Ǔ6^UgSy̡u6Ea|I}y?s5QCfRP!HPƱc18{Fl#)y> |S"gy*Rw(BDgr,(cն9 J 01+Ͳ쭍!V?RV?_ɉ *"/'`ɮg~{|K~USD~zb"FZ92Ϭ5?9cLY'tB)!*I˽cze\Lh䬬q 6j-R NA{P2 xv\ {)'RbC/1pekZx8֞ޒl"^O3{~k(7>n;C%7z%~yEdМԗk*v-}Oǜf:Fq&oܿlsz K Q6 Vy~+g\ \hb~[<FM @O(蕡͙qZB-8-%8d\HH Ke,( `1!s5府lֈٯ5N׎ KL)vHf|]|D%AfKgn#Wȇ8lj#WI]A+esTl։Mx16C5;D^hJ\fCb+mX88Flo>ѧ5EryQ [53($fOMwȂ6bo59EJN[b){PN)xhwuc]-0Z_tWJſdhGSڀZshe@JC:DX z[   1le]*] xb;Htj[٫;;볚~&> ) ;Sۛ<D0 '+2$lV`:#er:d1v^-h2;Y =|kcmPYSm!M܈%6D)_epN !O+ȃ sU;dU0qYSP&}I$ʗZ`aRTQ6sݪomoaZ-Zb=ZY7?PP-Eْn]n4%u1KEu`K M1:D {92@yJBe`Je0^Xqp 7zp!c'Fm>Ry^uy6fCx,_?8~߳ϋ4yجҗ/^vIzOQמP! ec nr+|A v)b[G3E_O{.tW]TBt/9 ?sh>^ I/Ǫ~*$Vת}(>9)gǿz 3<>|{G fjݳZl%q}`0}nhdch(WY Cd%S3# gAi>F'cښ/AXsz+Pz_ͧiH8,9q}`п _źbju?c\_iBR^kN3l>as^y> JE$E`Bb\$8 w7 ~mM??ſEN>ec㱜N3g LBcЮb5Ք϶nx=LO5 "Q3U ě-ަG՟lҔG}NnXh}ƼӮf}D/Ay_-xnֳ۔侄:iiRl HeDI9W̠Ӟ'Vw~V-CR^RKq-<SF|q,W!0AsV##23C4<(iHB3] T;QY9~kMM:}ՅSOc}Sārۤ-[U;|/qv") +WϢϖ y[N̗[Bi>2%>F!e--mtڢT8+w4EexOs2ZH`J#S. UY: ]'^iWaj8^kvs t6*JxēIy FpNcu}cӜr)9<1gw.z*4`JLks0-P0NMBR@)-VzkORh\qxEЯ75`Ӎ0 SDޘiA{n:M~p7;K[6AONr13wX |:rrxVz(ĹRrx,\UN|籫Eբǃ_]ij9F {/Zy1Ja89Xa`_"XGzƪ N[atS؍ao*FT*. JzKp/ =K)+/q@|LEeFԳ*JYHq[>q-YlyT&[P6<x'JhiN{dzAS$8R=Plj6G@I5)yqJ`PkX1FdBI'T< W>܊y"Z[E>4mF>,Xݼ]^c .1Xlj%Wu!_(\hHA"S AdqWB #uݻz[U#!#p@ c%HAM =(00z/S*k7<>Q7R>Kl X 6*b"AlTS ZƷ'F oMkFa%6|˩[!8?\-%ۭQ\+;葴#cb3di0.P~hA%Z:2;JWĐT;Dݪ]To־J3!ϙ Iʘ²8D,uJ P X˕뮂`+ϣ6Wś[<w].;Ti&I{AG3)&Uʨ mL/<~<ϐ`gʿ

x49?F_c$ݍ:X/?ݍ:]]i@#S  `*v>xRzjF U7 DOrýVh޵mO<_Do6UWР{W՞NQwk̏[|2ۃ/^nh: C/Sh>Y:>.OFg_o=ŽYj -ؕT֣'؄AH[ŏM$tLTRbK/-3N3^>"7+Uel-P|WmX.aX5`J2DEG,M(b5 FG#,Oϟ{X7G>h˗4 |b%T|p~ ڡ^`C^Q6v"woYzn ?Gk}9n,Y׽ hyN2桼³ZL_NeÒW"jV mm Yc*8U~9LFu!Bb:kAS٨D.KҳKƒrX괼)Iv_M}In5bV{B[/i $R'҈͑EjܤH] VnE j'ƴ{dn 2oEoϱYj'OXIPDhRrB/'R3^,[FU nSJi}mHmb[O(pKlqjBUr`j@"U}*lBjvB* ' W,hWH.3WHWHJd]WWSo0{~tj#r) WQfTZ]pe6+me!BBrDJf;zp(!Tbw UdW v 촫 WVK}j lgr; +Mv^be'߰?mv:Y F={A_d4FLNYRVRSG؞6LpM=ߛS&.H;H@- ݶ4RIuO9f|i/neNxd0@Q,M0y/ /f)E _SlemӏjNB,Ƽi8e6O CW~F~o\JSFw(BrinQT OGJjd;W@!BrwZRiTWO%Z\),5v\3.gRpTΗJ3C!)%;WH.ۙ }Vn>R;)•z Vbwv\vZCJk:zpe?i sd^F1YQ>NHNϚDR um&gd v{\p.}{O@qU+zޒ; ٥] =[-J $rMvIT-f -'IfKŒ-Eb4xߎĸLU/IͩnI߽cj{R݊y&ۚ3};ZV),'{:bReRS혡Q4ZGA0ܒv+eTr~dK\c}J)0Ndk[w}'$ࢱiNFs%c"^)ϮYs +ΉEŤU LĴ< UHC5R +"9XIPק 6]CaE>V%$M?Jl Q$uȼ<9q +5ISm!dkб?vDM 09O’$RXSG*T-(',hIwm ϳw3T/f΁63D ݷzfHGmӀ%jgҖYF˟J;|˜l%Cw,3kKN6Q a%b.$i!JҲ:)OۜeJ²p}iٛQruz5|B7E/EAHޏjD]q4UJBËo lUz~ǗM 0BE_g糫0Ȍ IsVB0={\Vr[V L"qx6- _B-nBp}ց4v4-Vr.KEvҨC$9&6^.8V0Ξ*5*Jg40.}ktVkJ&iwL7A{ϋ{ƳɜOiVO/\ 2`T#kat.]ˌFv߷FcPr׭ynx0 $GC(XKIWw$u׿츷)MR"l1^a3= =L(:~3kb퇈x%J__"*w cN*%^ XA;S;S"g3_9 +O F~ #S7sg6;RpyP٬Yxke~{{GʠR{W%H3W!B3VUj h[$>6/9GB@,r5BŔp,NߔAZ+`,@:1&c&/1!}/v443 U!Xi(u);ZB.PNRV˶ݓyGC('=ɷ6}ᶛhba66WO y=(Png픐qWs'r&XtJ:bpJ0@ e-| ,T-?ȹ^.bp.c1F1sɭ`)$qst'?]˵`5;=Bvf/HH%f؇dMVyk97aa{Q5*1h]F: lpm,OjRz\Tknb ƺ'- #BX6}^.Ҿ V~$սf %Iusצ\zxBGO$#~xtrӅyD̺=~$洀R4gKrԐ>A= zHT;! D36F̤Ek0!3M!A#g89)DD(h7:+$>&??{ $2s̍2.'G̚.qxca@#E/PM"^Ž rrM2RZCVcTvrhhġ$t ɣc#w7۬%WݎB͜K .}qO1U9ROAxRSŚߪYDEa} K,7#LAJD6qSe  썽 1L8zb9֘x{crt{cהBQP{OiH=.F@R {2U c$w7'Q[@ KK*Ljd<)V2""&ZRPAH8B7"ݰ-lehA{_(0n>|&S^i{lpIaΦ1]/ʢʭ A$$XbT%dT('dG8ɀ9IHD`'w@iV H(y䞦PXHj7A2jH"ɵג$VKNJ *fH]nqnOJ<\MR#xW <'+xb6 7t2 ^&FMBIO{rHmHK=$y>7ɣQbSgU)$$qk H`oaTSጟ$O#'u㺉/. #KO""3""L, `z?TK$\p̘7Gh`EhQó&CC:(jzgױ;oɆRB<1pU~"US"AL[.Apc# 9ApZ)iA##BDvۋ°7Nq06@EY p:6J1/1Ugo_W"zM^z|i;5%etèyEң*gSuV%F-eg_gFs={Y%gϟ:Лfx>ߟ!PpY="G#waԾWOfPTDS(rC!'ɭmd0h*K3j{ٞ(mqE5AP) ŇT??p(q/J!K!V{qxkPW f * D|z]ȏ*icS+T1=Føe'$biE0E YR zݞ6) 3= Yi~+4&kHS+ MM @SϱjURw\w&1\x7iPGCRJS| ju49l-Uڜn Ղ3V/фYx>z&ߓƒ VyHL/i/`dFBD\lsK N!6}!vB۩/OsV[Rĉ2E:zR<6if-q)lS*W\v1',hA^3'&h8.h_8wRF.I`w"C(NMݡ-QWyw$P113ߍ FY]0K?o9U?Q5Fcqc.z0ŠgaKqq05qM IQN)M"$jM4Jw+'{n;[r3)VSV݅rpDY&1Z)ϑ OiTǻh`%e)0'ު_i/91߄ "4wk%Qb AEm3SȘhі@ If%ɱ@CD3.TyĄ385<7,SdM>mt/k͓]CXǡkMM>܌/Q$M1VYJ̃"C9$[)_Y{$ONQctZSƀ@H`EIKdPt*(e 38뙱;]3.lfʅ|¢mv3f}Hƴi]M]](ORRXƀfX SK r.PL(O22ޭ. >{.2½g!mr1_:mG hJ6[2 t؝Y~|[&fo/t5;|`A]5Ϙ<2sN2@[Tk1ه`-a-y,\i&RiT !`EH E G(|2Fuҏku͇YχQICb #v6?veD1#F\5e0d'-%2ک,h%06-"'2f'F wʈPB`'Yiʣ,#I%E5O&I h*ƨüYψus}lʋc^/6Ѩ0 QXQDb$A2E@(sx>lvw@{ [)^kw8/8bFQ;=n7x?Q0bԡc0._WvTr\P]LD%sU@E aqp{|{Ɲ3R"PF . 9XN)L!} LA-s>ʿa~Pbcm@%DL)e@p+Dp}`\ .LRt :&rcKfB;02a8kpͫf G;F3v?RO7;W7aC+a.TPjJB:jɈLa -#fo!plE}9W\B\=$ȅp) ?Po_l`m>grý f4lw߯R9j+Z`1Pf+ N ^ F_fmvh3׊^+чj=B+_}%>Bfi]OR %Mnr06jIUfUXY-EaN&˄>̧䣙K:rztБ!ǐcmGlȞUay;,x$ PY+SxO@YbbT)H%)rV@7(K'.WQ֋\4:cV8EFDdV Z~,bH֑(@!W|%)U|]|޼A6io`!dšt|j^<|FYSjүu$֑_8oQo|A˭'Wχxrt#no@<獯ix~zu@Gُz驹Ս[/-;,VZ۫Ոт)騹=Zk?]k*~{<ȄW&ܒӝ{Jݫ`o8;evfO{Gk{5nz&h? 'i:A'tܿjiP`++ Ww/0!bY5'gϛz1jidߧٛ=R˞֝)eМ*٪* }r$q]MJ.T?!(CEMbO\,1&DjvZ/\p}ȴp:hëhtKhRMGd<@"HB Vnq!nWC`{ss'XDrYۗxW-no+V᭻7m!A#+B<wk/11DD,'Eu*+%|(pBè \PϹL !YMaFmiĮ3tvgC'U>I9qIN1DAkx/)3J\f8;4!1+b$NlB jY#)l[3.T ]6xF@B6>P]T c2\">EbٹlY.KLD\+@B\~j'4;-00K2 d4 (-oGV$w)fdwH44c K}i[\b3hp5:#R}غ :4cσ]9cI]q0*ʡmaóa&k+ TJH"Zc)kE_JՃ3az1j 1XG42?;J. c$x |wx!s=gS(T~曯_nTp ùpQ-a13Ȓa;x#QK$Fl3l[n!\J}͸dN6Rri +N/wBBBWVtut%UP[;^A+U)thMwvut$J)N-=?]!J=)ҕVFe `iY1tp/f jwBtute $7(,,B[툲5g:Z +ˡ+^th)dO$l%9'~tvp)9Vh}캄D]I9]5tS)fDCWR wB "]]1'mVBZBWV]$]YKkF_0}!8$onsNl{qb0UZ0Z\RAH[ߪW;FCI';],IhȕV^z[e&y͍%d`Aܔd #mRA8A%CXj-EBJwBjNU誎)GBBWЋt( JY+*hWWsh ;]JE@W'HW܏µܟek?pӡ+K`%X[ ]!\^ ]!Z|D:t=='oo'&ǥ# n]-tSʕ֜CWWR jwB`NL ;2lR 6]"]Y+ZW`Bsj$HԻ{~sF7ɾ4*1RhrwF 4}4ŘZ!m=,ZMg8i&6VyñYmUzV!4ߥiY&BaVnķXeDlL.FABb ZMn J;ԝ"yLR wBZtut^*M~ w$`$} [g?8ROߚK?-FOp6寷5̣ p+}|(գ?~8[֯k|&F꣑࢝?Gm_oF`3>PhlQG^=0|9Hq.~ S3\@TϢ?yTϖ h&?~ϳQv m5F\&h+M]p#\y 0m?{x(8g&w4Z~~>zPGKοO77s8mi{(ld+QKjG9?gLM9NUZN{oZ"J6:x)N 8bM1tpۺp wBtuteDWXiQ ]\M+D0D@W'HWVFJ2-.Ŝ3"ZNWR>b=;'Pv|c3n9 p+)f;gtŶ+6ծCO%SLDW؅շ)+C@W'HWLjKҮUhWRIC;]!J:Aℴ3o)~y2`\1 ◷7FSm9Ś@:İXj[;epO2.hFTl2NPJrS6ϐ2+VCWBR`NW = ҕT*Q]`v U+Di@W'HWJsKҮ0圐!\K+@˨;]!J1"]im6µhWhsۡNϻ/]Þ_Wtpe1Nʐ k)+Ѯ!t(+sbld׿B ]֗E,d$dA}ʢ"Qq9EҔvE=R duuS/d8 o'4s'Y} te;u` h:;]{2oBW:v:}-]&^ ] OihSj(tj{z53Lx׎LvC}~:k³_8RKٝ<4EF0zۜ4Ήk[R4”A~;=<0f ll 7Pb^`mf.bb pmK+Oe; e HWNaAtTOj$&ڧ$K+[ҍ:a^]h@W.ZBW/~uUҍ/_N6۫ qj_\vE|Ny؝pY]mys8pw?bts|"x>5"kg{eut !޽tq)iEkD=~((v;]AfFD跻1QR:{xsTη'ήp`4_􉃳W'|4_zo>?O5 #-z#/6 ^nn]Ai޽ޔ;~ր=^Ǜ7I[wcKC%m\05Y:ˑQQ6݅DP2B{|7I=6{_o{uovtoys>p99"uW#гn*d>eKIYdVL7\:GSf ͨ't!bJU!iO9W]Jܽ.Ǟ cGajFZRDžyƕZٷ*BEogN]e,Z=c 9RkPTȁbkdr-H̍4Qdzs&EMESyu{ I5K[[jn\u ɚO͹pޜ@cVNTRZϩTRU&՝H9iE%xVS sad;z7E"b'QjShі8:FOk1M€ڈj`DU5/ g\mFO!jT'b>#3k^,XU!kՕkʧMpS7B 37%>{(R.I 0kϒB4BF( ٥f ӘJ2](_ ,W,ܢ % v**6(:j< uԡiӊ#XQ?k(QN[PyU벫+qA[ <*!kBhcnue+y ,&NCq}1eC;u{EKxٴPU7.`HJƬC6S1*T.JvdZ[ELN=R{A19vx X-VLCjD*1+5Kl`WP&4$: @8Unu45~**өPM58NH&``U %%2r3JA e :om09 ȄكHcgrcEBy>uFݚ @E&rZed^]0P>x!kn 32]P&>yt_ 5P2&@ݚ2:@vdm^XTu~V% 9ՠjK-"V1j=xaPMRuY+ QHK>|t_o..Ѽ ȓd}ȅBOeL3f߸ ]{ #FK|uI>h@C^Z@mD|uj З9t 'ptQ@R@"TePКf6CܖSBEٔЎa<P9@'^BkڝChgՌ6!XX-;{f/ (2Б5;؍;IЙ$ fjbRE ٕ P?AjDPqY]Qõ3`Qy0 !SWI|j8ˬB A1+'JZbMπqt֞Ewih52+io kNvzrDM iorAVe"H6 W͌6#Kp05n ڪ mry8|_oܜ`tݦ9\#Q$;{Հ ԭGwH7ۭ6@'eJac`sONd^G ݚZS{I(!e56#;NoUfĞt5n(!/[tؚb樇 tyB1C[us"ڛY'%\IbLvЌ2 R.1#K[P  Xoެ7a; Vk;"h$G\k'F4$\sc~?E bX0j);Zƈi87F1 w01t\:'X:W!hSQYǨZ054ihc137+VjE5kփ*TJm3ikX*lFb\Ρ~]:jUNàe6M;̀| =pe㾐?Д n FU2'ҬFp*q˱ %׆.I֠[ 1xx@A4Tm\ *w]. q@C, cV4k&" %;)jI-, mO]A#S|fQz웻yֆR6kf͖bSD-s'Xnޣ8}>ϵ_'h=& Fz߭=tYQ_[^{뮯[9?^^ >?`{}v_x{Ӯ} U*O7.0w'Wp|~wG[j(x˽rۯc\m N w:/b,Ko6?,/0z_~~|͇8[foz^ݜ~MڿRKg0asMۋ_]u٪xv !mz.][/RO4I= k"?¼ (<q xМ 4\yMp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J Wbշ4\yp1 2\0b WYW p^}!"p%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J Wb75\Ybdd-f3\Yrj1;\ (&dRG WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\%ƭp58,p (K4\Aϩ +1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp5c_XZ}X;Zs1o7ZVeWxm $̢6` 7"~0$KjRjz1tҤ@ϞJf8?9]jtu"'vߟi(}^t' ]=vIyfAt~At52/oWP^ ]iFk @랽(HWFi2ս}X9X޸xX=[ޝ]} ow:ZJL+YlV&3i:̄Wa, !}fl 5_ Kٳj%,YzTkVAH[3urĴTw8fהȩ70xbrBz͕{!_)ooI`drȥĖ^z[fIy=(.}m'ꊢ/f=7Ld.& {oA(:o\if'O\'C\Ɵ Bh%wrP.E%7D!AvJ~ zO-e3BrY;3R8bP^Y%B{qʍs,18YX%͑y)w.w߻L*ӡG0=\ ifՄDDa^ ׯ~nss5D;':?>ʹ]ha|So3Me0z" (Yh{с1yfcSJJIWs/|LK([YB@SVT_GY  ;F-2 ,_]F_5kM&IXc1_%Fsp^\DGZEp:RzYץ3.%FQ'@go-oϯ{S^gnlp @ffkF|[;`7Ux;m|1W?&,qh : }!YZ nesV:u@'¤Dfc5{_^RNG3mKx"kU ɀjRP$4qɰY DE%j䁻Exܠ>~ aO%BQPԔX`koL.Eg%a:J(,"ah a?PY HtQXn}o7w}3DlaR \x܋iA3Xa,֞ t6Z#r6xi $t@x!<4HFs<_6Ay&knDfD)=S^#׆S.uRiBdb`YG8:L9 8{ɬ©L(>$ViZ,l(&EIk`pp[;ápXWE gG CۀPԶfw%y "it xQN,Mv޽2~w2^Xyow>3wx@}q֥գDι|?dd=o)ޜFouřt>bxs~vϦL˟+3IM^}lRw;E1Uzy.A?̟}ݠy'p—W ;Ľ!|ä{Q MDmA0aʳ_hBV 3,Pq~$ л*IqwU:?.SՍ)L1Ri6GԦ0x>gʋTd194 }?F UQTFW$ sCOuFot_ Oz[{F'ܨ%ʀc; r`;\/noH2-g4 V*MYHNxUZeMz:..l9qx7bEI'd^ RHJ%+rTGΘm0ŏq Gz9޴ur.u*q\4|TAYwdX˾:oTkj&VaHO қsu H]/6׃Դ)g /L& 3AhuDz{l0jFEskM i{27,Pj;z/etmo#Oj41 T ͕f;N%hqjkiVnJss3.PG93-5^Q< Ք)3RjӜ+f F+6VLߓ=Q_OK]-\#Dr"b122KX.zgq᠚{IYI.uӭƊq8t-|ow1N,awnQ.{Z#(Jܼxzm~fN|J9ObZ;*T(\.}Hk3H0gӸgYc~qdf$R$Jǚ)vs*KT s{Djbbgz7ڐc'=82xb9)P@* Jc*G,kpۮM\PP5u)Q Npol99I$.*9ZEztk2rxAMƌ+;K!XJ 6_/n6,Պ ]ϭeG7V25O"sNgJ /dTAd|"f.d9QK XӗbM9H4d4@=/aT,o>9 u6_^ɥZ(QQFa;8M&?MΏRߖͻoœ@90zb*]"ca0VZ.Pj7~w *C*ѸSjxlJ#vGbUj_Zn&$'5=41Ja9*YL|4<B%Swзnn ( DDħ*D #׮,gg1ޤ@lYUh/Qx|o_)N9KEs)VXk"B*IpP n!eˣ2قA;(_dۨA"S AdqWB #}I Z>H>&~ۅ*~T֡8"DB$!02QK-L2:4t怇 (f\r#bR =;GaV-\{rk@XR+e}-M(b3 RG#,&xί:/bwu[Mb E}}{1N.ߟA1v(O@xqTFO)}ݫw@ޛg.f#u[D~7]7M%>nj3OЈ70}, J/]֨Ekmٸu IԣH`ʦt>-/-Hx)K% Sx9si pW1ŅX='rV1839RZ.̻Cnvr[M/Wqby^qLԹ9Ye/m ;MI9,4Q!Ĝ2`n?:ASfmھM`lHvrF=70,Gܺ~E8\7H@"l$&tJT2K@,|X:8%J¿}Z6k?Db%j'XWRH"x% İZĀ"5{nR.+M18 Ӯ&QaU gY̩ٻ6r$W|MY|) ,p9 o/H$+J,ǖHՊ9EnUwCVՓd Htm32$M'q1lfΖ`@q`ˈһ#ɫwg&r2bzg&uO9i`}҆ O<*aLH, f;x 2QBt!fzsԵ(0]19͙_JS}*!_ކ|3o@UZW:\o59[ R5yRޚ&ZI, ћ , mpgRe>*A)MO RIQjmw)zw -E1>YEs5;Hm}8W&;'N D Q0i<ç?JYgs$:Eu쐜CUt4,0Im,v4;R{'SݸݸQd(yG%MH9 (=/=ha Q d KM3匦H'\psNA )Rl =  &H3sTʽGw϶>9k?`JbTFau>("&Dp&ehtJ em P|̦87P21Nb2L4`0)X+\lt9;j~3w1,͓!0eԌʫ}5l6NvUVד/k:Bshmcz .^rq+T7C{͞ǒ4*?Q0l2ٻKd>u`  IZ<!+%11*FFtTBM:8 :GaL+ lNh*ZGf<af5 *.-v"5);󃳌% _q??aOgQ~y1>kHIi)BZ(3e8J6 !{og!{<[PW7s9\Γ5ieCa[aJEYZNmrq AidT2e!uQe&^9 +rbr1ɥHry#}af2m00^rx8@$Ԩz\! Ef9h%CtN^j2|U/l`ze nGg^ LRTfKHe~̱Rݭ>O?4KI#b&kO#(j:.uBatrDrPK5i,L5XEW:,l(Hڡrm+zb`ydoOzބ5+\L\z1 ovFn4mzjpEmt;7co<=ٳg7_.ټ~6?́ϼ+ߞRʙrnnVzmVwsqG.XJmWvCoN|w:ss#\{Af̭sM<7}g\eCٻ4|ӟ-tN0`a mPY)9{"#ڒڒ#ڒb2^FYalVq&Gk5c)IWJAm_HWΐ !$0Gpc6$!pHг)9*KZE<.XEz]zx3XM+N(xHa kC-`#%b O6UV=0%율p GCpc̜- .d!.◕;$T-9|_<1e ֒ [5t3>:ȱ4, SW] d-+j+Q'@9pR[*8vϡA)-BJ".KofΖ!Ș<ŽpO5@kMkB.R3++D*Xeʜ Fd%|h1a+#?X*C|/'7#=X"B2 Nڪ $BNIg Q2'*v_ <@rg a"hL> e% ^iMtBǠEI(59F2؈|lcmב!|-Iuuh.nX ˁZڈpFpmjuoҌL|:;.fbX&;h䘽qxQ AqDORe,Z ٵXZZ'jw-*IMHR(EYq[pE;9 }%J2v:!-X%Cy*vu 'XJ`ltUZ$RM zK"br*l'Er)\4ѦP\뚯f|ԑ:KΡf$-@RZIA$yt%p6ugy g2/B%8T ޕx0Za $0ޣꦈ.o+Զÿ&vp XZ7 ;_&4jP'riy#h}og%˝ӯ>_}:C2[g Z7,qlT>'{#¦'N+C! $"2Hdء(y( =YSӢtgӫ #f'tEL}~;?gsV]4R9x6 $iHHP beV$*]L .j*s%eo"#_BP\db!7 y܃YA;Ŧ[9Y;Z T65PyS;YY#PcTGVUx,jZgU+j/0CMje;p#J;pUbክBpÕJ;@{eNrPf\듟W IQIƃ'Twz~j9/~ p'wo%m|vb6Ot%5Nj#9="$'U_ݗ]If~_H :_55K>燓黳+bYLfߩ?姭d{z@=n^6f{""[}XxSegĂX!}>ɈTGPĠ &T)jva[uDLi ռ1i 5ͪwn6«cZֆgNy4᪚kf)}mv^`2Fy# W`k&\Usl4cWJ{z U^QbO^A5 Z,]pe<6Xj:pcWJ׮^fveGM}rV(=ɴK>83U-{BMMn_qp;sD=K5o>IdR$3r vd0rzs}Nu{s ¾7Ċ̛̣,ζT 7pM%lg۳:Y)F6#URҀ uѵ(%U &Dc Q:nQmε\Ŀ_^^-;_RN,TR\qCeov6.,vѮ.kϻhW]_]]vuA.EhWEh hJJW*8]t`W*8Y NW*8]tUpܳ,)h NW*8=tUp NW*8]tf34ʙ)?!4h* Nt&5%i@;|WnK?`em'Ź)e $ Z`b$BxR6*eǘL)C@m+c/~I>վE hlsJ@Q6^yzqL -;?A|`8P%)E^[:tO~ܸn*i7yM$PJFb 5d6Trkx6PZTy8}{Ow3>}xدȲGHZ TcGޮձckűc+=`oo9~UrL揚v1}pf~MJ A"_v Gq>gZ6_ASeH~ʵ7&x]kg!rSE2|ؖ]gRDA2mƠ̙jh7-w& /c"HpK!,Wб RFJCQ2pjLʱZui)Pʍ"VFudXDcZ)"V"D4Ҷ&.-XrB%ia%0*Ӑ؎zЭ>j uGUGӉC%˜G:вPq&pY|ųgGHqJ#9)0XzJnp ">p*?o@CI2WvXf'?yŇ0L{4[D|^s6[ݚ7-GaΛGaΏ(gEZn[d^F+{]9+@W2micdepSRJ`$ 7HB<84NE ޱ`ț}7> A wx劧܃h}ϼ3P"7‡ +]⢿uiqVW9PQ}$Ч❙.$wqr> Eh7`x;w0W 1]J35"oX:,vAiP cxZ湸CI>֚nl|l}Ƕ2. P<#)Q& /pha`R\ wojUʔzd2Lz9SE.ŷUnS`4Bz/61{1!_7 XBVӾKkR>i՟SR42^f * Fy#SQ:#topS/7=]$mYuiTt^Lq<,snVжnx7ܜ#! zab8e  &2HklH'3Ƅ=6e=?psΩ1*+ 0\ /x'Xk^b!$uBhneVMòlL78`%ЉKqK@S8ֹ7<^?Pre],Cb,(U2xi23Ńt96'vQ_67ډZ󓵟;=B3 !3CMG(Z΍@X]z|0Z4!#ƒ^ wE%K@)*2NjM{;fL뻛F%e*W'6Kc?'E|K[M88v3#pW(?:H%7ih 5(O+ CD;@Un^!VlwDO>$cƈ(r&DxIp:$hL@02AwKJVvnH>x'~&>P$Mj7*LQfW.x>{R!IE=yG~1WMy Nk&.hs> +#_MZ-+"m.RJW6FYY(HgA_J,kk,೏DÍM[}}Pns4!z -@jg>@'Hb:D{XG^JY(Ș (; bO+NՉؙl\{}ɺWpnذ6d<Sޟgklc‘%4O7I4CQ bv,,K^a:TrZ7\=F3m3z!8uHJ`L{0댹tQ, dҒ KD0kGHP>>pwA3FЇZlOAՋsV[Rĉ2tc*xlQͼ3ZJ(RՋطzњ8$tOqx2zy@mxXj5_Jy6|DAtas'e$JRʹh'RL3 YE AHRVa S띱`R iD@BZ"ZUlM{. ji1&څ^_˙%xMoiy "J!:I z6w /u$+nwS6CRl QCvY$ProDʵ)-qt'IJ:i&w]k-Mkzm*r*p6q"U9CW_;@'g-n*Bki nXdyKYm*߽0uA"0F ;:Ki` IZJ@RbN%U(GkQ G %".93Tۖ5qalak--lRlyB kɼ~wHC67~U4ݠ?}<%*@&X][o#r+>YuWWK AFMY-)ڿT"+$ %[=53]_UW}V(}u2/dW =0TY'PbYTA~ ٣B TFlaN8FJ Z]Mߗ8-vӚ9f}XmլvC{ɘCT='fHFFkRZ6 WB5yO{gµ2#CEG2E1WtA!Dd:[aos~V P,bo{,b)(]»=Si9Dԑ>EI )|V}RhFHWk3c}QI_*YRLH!:&iUvyd%Z߳EM̯:]uKRUXhzffLW7HH7"-QDJf.:b3[s-7ȣ:|%'L;2 i#Z+e?^t$0ԯ0q&Z.싲=Lڨ!WJ^m%zDl]cYd2)E`B(SHQSIH-K \X&%/蚹MU}ýs/&sj tW=utbhutu#^r6֞J\7~z}] I[2e<R@G +`**!'*|$X-ɉ/灯M8Cxh[".]l Z4^_O'e +C%RWkjtslZ4?蜝.G7h<AoY ~ub;^RI|n u[` i4sgL՝(;-(j5F::`@\0HHX]\:7( WʘbbJR$tF~ ]WrxsGW9+\n^O_ٝ{EM|S뮩SY?{R1.<Lz;fX2 yd#WIOޛnɗ),G̼6rzN&{[}=揪nP&^ğ]oGm__ ?vmyɃr?=替ow|W_ln"u_g8*t9&a6#+)9s9θa@ZhePIz#be@Q36 M ~M|!gN 2ː>J)щ|H5B,JNF+$]ȢO+v?jWN^KOckC}y6i4Jxբ N kD^ Du=oX`+ɘ*S1WUZ-noEsD%C}kp=$e,r7wjF?9\! ƃ#UF6NnC?My;ɚhAi֩~}k}{ߟo.7*}4q B˄=/?}4)gJ^֏ DԾ;Ot{7fjY_5,|?ݗX hծXjw>=:xK-e(|t]]Iu o%PF؟`qTPF %`x(- O)➎STjJi\3Wo\i8%sU6dUS1WUZn4/Y᛹z檶pBOXz^[}J1b){2OƋZ3tXP-q-'\w` Wj4\p5FhZ!| Wj4\p5Fh Wj4\p5*9鍆p5Fh Wj4\ū&K[NxnrTklNuɑPt` r09ACyAv'1v;8 TAڳrJZPF΂HRe(ORDa`dze}@/ٷ%2 ¡l4e=9; 2u?$?h3qJrNEbʎb/'n[zz$ 2)|0BPTS Zl@2e$tdAHWԫգrI巆*8rA)M[FSzY {֕9 >7)eM#@^Tr. @]VTT Zd%|Qzh M  ݅Pyn&ՊuZ QBQJ838ޣs?!'BT_VΙ>t [g;)ClT>'S+7i-qX6 dH )Inv$8ﳓAQ+1\D 梤 )SQ)/ 1qI(Qgכ8Gzh@ML'_R <zze)۞lÎ=:|sV(7b<pE #L(`P(P,:Դ]Ho>zKp^ʚžBr/!P\W2Q7;q/qkVTZ+rrWc$HݧXP+g:RI{B|U`/OOENOJfJ['5||{O/? =r_0L 3o6[Ia^۳@)=E,F)r8&J Tҿw%')!YO1&)M;+ 3^7qsf?^spy'^m w}iԿ/pt7/w$#jin0)j tSV#PvDa]2RWYÃ3 7| l3=k*ϲCBDާctJ-ë'3\fzvfy7}4J룵1\a(/lilLLT)PxҨTNf8^rËZLtH1k`/_{Cfduj}> Q޴~\nQnB:{,8T0tH=pX@X /aī/ݟf6HWH`{^".H@e 8cz{O{H=A'ZWEAގX7k C_F?g58&'3I'Uk x#Cf.q'Sǯ?dVo[<ǛZ~_Ҕnixש:6=fj/vHQs/y8#x/.+eM[}awmHW1)|}q}\/6|7yDS}ɫ wm#Ik~ nnn~TFc8WMRd[˔ML[bwuQF\mE:/Fvr1a߻qc /n8yo/4'\xOY\jFHg.Ϟٯ*(ΧoY5\\ՈQ 0/~jG_n66[ۗ_r".~r߇UAk'c;oNԠkQ1Њ6_d\\r.)QBm{>1mNnp (SIydf\:G'wq_qVJKW8\R) ڭb?Yun*ܠba6 j4_30B=Ъk(j΅֛{fjCm3X4063Ia|^,{j[mfyf am{Eհ.=r7k4S],n.?[:"I* RlDr< Zm 0NtRQIIk!GBMTE9*qUۢQ*q6ܚM{`up{tn7]~ -O%W| 5o 7'+L?_4i#ҲçQJM7G!^Lqtb0NjIm}KP2wEif~GI-G10>,8´\v %.Zk[ˍ.\0w:aɉ:搜WZPUnpQ~N1CӃ:^ɥzh08HUGqD *{FrvN9:Պ#GUS>ȖoJEsV U^&%op ǖjGOk|2~Gj3g~pFQE=9YQ9}ss<)! C6f߹'qfisl 5^}K> dXdd߯bg=2?r2:ܜ TU"s.S{ʸK%'(Z+s=by.Cm~K6WB`D$Qdlxx'S%4J^)$xt 5):= D]j-C-ƈD =0VQ N ͽ|'v|'t0S=%r%ٗa%x_k2{+R:'E)uJ)OΘR"Kƕ^ ))RUJ_J $FBK,d ?omTDɨhQw:#gvw7Rp9iElTv 9afIxq}nx9=_9-7\9KtDG[O6Rc̏;T36vYoZj.7{8R&|_E6v/|\<`NOdO?\.i*{v0+ $*"z⺨6:p-uVwKq`I޼m~Q&/F=z"^B?ڦxluFY |XΎ0e-lISU}ڰQ4W~]gMjqQ_{W'O?WPs!qGrcq3Mr9eSOPzɁ:࢖N /#γ$U@l b(^5>E[fkpbp>%*'IZj(c* pI C%+CNʐޕB} vt+}6^ɿҶ"Cޠ45؟j2b4_ @lB -O'u @E*E 21|k0z={u|֚ҋیңGd]HvgO(+2fPAvu1Rs"iDR$*ɳw돼w3M.v7ymA)e]QQ,ߝ;{Yfn~O+3eqO\m˝VWIP.:/ϣlYAf^.]&a  lu-BPOPso J-lD.D(م<*u |)gKƒr (o?wx.aS҇hDKaAZj@-Yz Db"p+bpVk81[sP&-#k s=!DrDy%$;DJ{:*vFΖGkEv|c[=)kr TИ?!: ;܇(a!T61E@ dsT H6LgI@x\$q꜠&&t9*^dpNԍ.zYBdR>R>lǣ;"cZS3tqև& ON@9>^*uOuj+Y_?NѤ|M,Ps"VkJ㕢9sqt*)=~QH&zDpi*(%R]K,*8cWYYxPpNQ~UF+攞Mڄ!~Ӡtzt^Mqʅv "Z-uփ2*%Nr.PL(rÓaʞpgYlrsC%f6ꄺ16H t9%りٛb]qG_6`&c)S 51ŜLDkj-4|!l!!q'S4wp-I]FR2"C*PErt|L2J5h:YJ'- ƾHcW(:bD\INZJfJSIP*uNkJ%!N%\ڤ4Q #@ $%Ws;Y"^NGXꓭ3.U. *TΈ$,jEp?hj )8H8Li}qG_MA3$>WS#cSl(j-~B/LϓqϬԁۣSภR.K(z:wAϖa T fo03g8qy eq4šҷ2[^Z_zb (ʿa~VQUɅB])DL)e@p+Dp^o[\NMӈn%Q'pzdBRpM4lT*y(bb:S bi3+Z.zڋʍ|7>g#PQ8$𺘌aq_RJ%"rzy1Jo^ۜVn}JحGOi&|Wqrۼ-OW{N:cҲ7co?21`$VKQc`p)l_~K±|;pV3ƏI)sh9olfI ԧ.¢z2'KC)2/Υuf,cXFr'h)c)`ZJO D L$cYR:Y0(p4HRa~Pe[MO9vL 2[sCSyn}Ǖs(*ŀj*}ERҚl$K(;&**s "2/u``x-!B>!E ʦ~!j_i "$a["j$YQ yJɇKu49ݭeXM[jExlphYp"pnl^Ah-"1?{Jnd>6o ` E{eIcb%Ke2e-KTw5XuݧO+Ӡԉ`@ah̀_$Moo x4o9l'xB~dF-}PfH1dZfw2@k뜜']*clOp.قCvƦ'J 9-( [$z!]v>1Egrٶlg3~ o :bМ [|z6sSu'j|LomK;|.7yAוoRHg+]}J?D# 5溸e2 5-+ٶ.җ vYU='r{-bt y}p\849.fG _>%+%-}y7InW;ϷY% y)pwӚEfIg. h0Fiϩ֪B?u6_Pcs >=Ι{:Tff}nY1 z Y|?u<+I_G!|e2=|RqquWoL*<斌db.,Bfqs;WF>: Mws}tO5xطX^ FVK1i;b.s5ōhw'llݩRYq5}f(M7s3_m>T3(J?4ؠ!`+vKUʨz#y%.5ڊ>R%h>=2vAUxnbO2rtU;c)x(4[!=3+E:`-ط]Tn{ VR z?}zqlŊ(geg3/Ҭh'CøuDklh YI=+p9LcS =$:T>lO`u~:%_~/`j=`2YlWy#䈳iGs ڃgcdFZSC3IA:!W%HdXɫ8OL[Zӣ˗ve/0/\ņy-AǻXm?zkn[XSzc{mY;Mgy'%ΊTCQ GzX2 C.X:XiB)IAkf|ǚNFضe:RF#7Fh }*:k PԻ]ãU(R qM{PŸØ$͜.$G`J Th@!fԫoM5=e!A!* Pb k2 BE0H8(0.~!]Ht*&ҒnG' g'D.H`&k$<J=95Jdamo xJ8oƹMK LqŨy&{D ww؛x^mtRF(Ep~n 4YQ|įmI䴫=7iin0'| ]a`ya0Wqjʝ$o}ayD-niᆟG/p6nb$=)wѶ+N|ğh[ryXrN?[ӭ!2uBDw4t凃Y qvb#RА'w6=O|Q#_{vφӢu[gNt c`,%}L|颻ʖ~B K*@-A撠Gѧa)kDߔeϟ tEі]Ni?-;׿,ig>⠋23Vm,uS~p#Gd2+HfG`#Ĉ6x4j&UV 4{k{@ `Ӵ1G΀1F hǵxhI#y''/̘d(rIV>мgtb/r.)V.E.C%x.p+}jq n(}FYQr27[HT&hUh>esbZ+_g[Ec+Qzs㑣]Ѐ%[PSJy@{^]zzI^x|^o9ӶB^墳&=`x0vŘb`2G\s> hHeVyhdndd *&sC<=zI<{{~#2KUi#Y/,n^%3f (z%J_Le:EpI&5MQ,Vf {.y0=/_&4n׿^J뻲a^זN4|ƕT-?d0Xv!vG|dFGϣ-ӿqϫ(^PqezFE4߾'B/ѹ1u,,K^a:Uem5zo5 ݔm>|-T@x}6hWT^Xx[B¨t21!ARC~}Fcf2#l$Z$8Y414RChG0psaP @<6׌%hD "C"Jװ "'UmוTO25ĭ{AU⷏~;]ETF2ǘɴDab(ȢNeP d^ 4"l dUB#DB k%R0X s-qv/Tiw7M3y)Z jzW uT/ f7_NS[z$iݸ}##AXϱyG*xP-τ^xUnG!a`ӎVwO,( Ȁ 3iրBpo (cU˨)X%EhKB.1L`ҠPg2R-c5qv[zX-Bwe kd3en7:]'>j:-6yJqiB$3DpTI󺔓q\AJ\؊,ig".x1Be21KH`Y1hlZat"^9`Φ3u!z$R42 CK"jEn(g1m$ƨؙB5j0m,OR$b@RE&@]"@vqtbմd_+E.>!iw[#hwLKiG^XG#-fg)g\ȑ;UyJJG2§|<(JEfՒ",@y*3dcKp6:J{_a]awi|4X2x[nQlh m^h6:nK~9Ϻg *P2.!v^A.<&dW3sNroٹW&SܙK:3kAi܎:Gy9qJИI;gH6) d!{hrO)<8$Go=$ׇ=ؓyȠm.B. 2{\LCgC ˋq[;4;v 6Hq3Y97 iirS[Ϸ]õ.BZ1\aƻzPWw|ruws÷ꑛy8z?mnnWv|oÝ^ͷ ޕw릃Aۛ?rm8/6]]{h6҇oLש\/>~|\ϭ?o1WxyrS5O=?s-UmnHYu88Ec7A< u/^kKWkKTkKQkqt`{k ShY$GdfRC & 1j|l TMTȍQ}"FG,eΌsyKTn\Sן.n |W3r{m3ә}3S0StՅUmEYQt"@-E >t+LU~NW|{ϻ1\cC&ꎜTzsRُt>)@>)j /p|yZ"8xm Mohm8&{=f<;G)CVƍViV79H9xݻD1xT < !֮2l{MMYPUl/OOҗ=8p{2- 7Y}?e&yiwOZ z$ScYRy6 z=h1Ėޫ8 p6XWghoH"Nl=1乳h&}? %͔]ɮ'D"YFlčZFȣ"! -*2Hgc?|ovPj/oGlPCTcRʳhyG~q$=8ZIՓ}tj30/g=Jcqt#2k{2MIe.bl!1jGdb+h]I26oez2}֖W< j ^/.La@RjHn41 EYB HXnJ bA10Ƥ.QY ({; A؇$\ >-1_hcGfm9ۤn2EC"˓!a!{ 81ǬOAJì!D>0 Br=*( TUj46C!s?|&giB'0oZFq:XL Ӡ2XӨ"00 md)9ư[`o1%g3Kyy7l&~HY+2"gS\(y17oO2twpץm<)EnEm{+t?I q3-0P`z)jbsv{wo?.?Q>$:cz7]!j[ 6a3n yB{h=h brЛQy XȒ|Sr?^\c7fr?_f/kgWK9Mp:臈`A!mWgk2iވ}"e>@Jɮ. 0MCa";Z M0w<>ﴳ~tTf/r{<@n&~E,,`ṔUkM-VEKXہX lqp%C=E.Zpj*|4jb:V+L{ JǕȥP XT:DU W? ]ހ ⸚$4jagpZjJte*LUhzj+htEb\\KJ.jh płB5ZՂ+Q[W^bo9s㩱G߮orY?{|׽*M O+x?s/G7f6OsﱝҝG+|ɣ$gW_vk +>_n ~trn;2?7<@n7Ql6p[ku7c GZl (nһ5]} c7xL:=1Ω=cC ˋqB.hs2vgA;;vv1l>$4;85}:*}7Awh w-v)@}N'+؋N~N湱Y;4Bg;:y41' 5DpE ՄX-}3Qf+0XLEba5Ԃ+V9+Qit qe* :kGE%qHd@5Y^+Q65ʓPӮM걮DZp%j K4{)փ@,T@Pl2gj5\rt\(X,xzRҒTZ/-P\^pgVUJJP X?+Qj 1JW+VimZ! Hr h31&+У 31ڭLо"L`] E JǴt0FLQOᆽa>CcՓ4|Q9B]VUVewʏJb8k|ZE3r>\`W DJZ,W7\WNTX0)W"wzR.ROjBkĕ9W,ؿBp%r]5+d6+Q5*W&7DǺf&_) K W+SnLfJUJǕt:0~S[4fa\MSk]MS ] R WOz%V+ZW+ij-W҅J *•J_ XmPP:DeiW(Hc7z{Lh&WG-S[RrMvh i2uJ*дi`?ghR* LcZk|`gDյfֻ}3QI7[oH*XYW"U:VA7\W*"\Pu%r\ڠDiZ#LEU+B-btmZ#%P5mTϮMWM4\WiUEؕ]$ JV+raM!T+Kd^j W? ]߀#eO=I6K8&-aZZȺҨ$ WOzѡ"\J䒭W*W` q5T&8jprT-KU뜦4gUpe ^(#|N:R*9g˄!VddOD"WUZSO$*6#cFW,Hj|"Qt\JkV+ `W]:4JǕ -W.jD7JjAVK`JǕTjWAU+\\ko?ֈ@]X5ĮDZp%j=+Qt qE-)A kWEmW7o&xP4aIjRT 2 WOz2 WZvLjp%r]MRk!+QYZ}WTꦚp%źW"7@-2\V+{uZ +'ŝLGJ{G(ڭ"WNjZ ^Njbm *6֤ {<; 44gֳ ;т NJ|3lPU㛉\kkDflVYe;5,ԉZ[|(ITzlZ!rZp%5IYz4KǕmn=K_XpP8"WZp%j_:q%*mhZ!FAW"UxgPTRWF\X6(l_!v=DZp%jI+VYWw˿dv+6*y5G7r驴It ɟ6q񴻺>;CΡwW4mwa ?x{>]h2!Y1?L-qz]Gx\}tӚ;۞ڼf6޾O;ƫAÆeA(`|U _=́@xڨ7>ỹ? 5?μN q3_v⯻.YA<<]t?{׶Ǒ%ם3 vv^F/ jHMy7IQ\%bYb ̓q"*/RRώ59[eKbOt[e6[1 $1>~pz@svۋ|5_=ܟ;Vc%qLIYȵsEl6Bl%9öIݝJ.lc'R Shs%26'NB. 6#s?ٸ4VabͯO| _[Їi(.B@|{.Sl(V1gƄEָ~M;*EKU*#YnRFy(')>[ի7TK4HmőC&psC: [%%_܍)aO[Z8 -sH sC1Zc˕1Zr6>#mQ̜֯- ٶT140vڐ(̛" #Tk㲳 ,`ʐ?!KXh*ƈa4B1f/`}G |d hDI*_Wr&uU#NU^dNwŸ!:ICާOEvjew-G9Jj mh l<f{_MqM>{m9$5nj=39|XQM^B;v".$Y·>I vcTp,D}NRΗ Z2b?YoiվXVlZV!;3&(z;R;7gcPG"X Xufá8tGj‚:K+vo'0LgL߅ *()ˡ<#=Jvh A@kaFh_v8 Ḁ7C/ͩěE BBpl5 %_Xlģd] 2vC&Zn4giF pP\',5Д֭Yn( jPѮjQB-96=ydWc{Q+aM(uH#C}޻J ],@.E0 W؄d hᠸVߍvPt[-PF oDx,8h&`-EH*zA Av%=m {- CdܡaS`b 1k% ʄ@igL}>:\o-$"XY_uawLL\CʝCi688)1E 2XKy(u;@g4~~ :7XaS1ED]`)-A)^ufwB=A/:;: u488Kh1UUDI)bm&J%urY[%p`ȭ 6C0mP"1 bµ8b!z_P4yYgF$} <X.-5ZyOyLPB2$k(BR[!q:p _XPt]OU;1]l;dzeA4T2X ?&?//~bEMKq:7XqoBfȈ2ǡA]ڐK1X xC4*j]JX7 5s0 j`QUۚBp ]fcvl( B jA *5hIVՌ+VF/ֆQXy谜`5M6YPYSx!6Rny魙+ ."X ioU! d$ ྠGûQ$;73d62؀u ?7{p?xpzqo9Oge> G$` q4z6T{ (iSq ^;ZmQKǴuflѲFh Ƭ,c毖WCۮ*3.56% <%:`O~@rJC7v#a?ThM.WslwTP=`ǰ2]M]Z@z|bc(!= \ߺ]oal쫀'+M[nڰ $\FzCE7,˼BܴE#$q,Qӌ#xl Yz U GmJS\muLG*М;hc bJm^xUTAִQkk&=b2BF JVx]YO}~38p1y|7i" nff?Y 6(3 @PkxaAL BQ3_ n^2čvXv%p]{:5XO(8qyJiSܰ@bz p4LZ$FlnTWҘ&Dl0r;@5)Ԋ#)Ek7L\Х5K.?t=ϫ+ }#7> ՠ\kx/nxoJmaj,\uTt${rx s;ϭf>v?7hҧ}н_y{ eseo.?y \]|s9u7|R"{;0{~o%e&?Vn1.*Rdws/|ggr{Rug/~2^d+ҋׇo.<3ߞ$xok~Vś9W:;}x{'Ĥ_:!{|)~ cy~1bDZ_펷ڝw ]?Ľ?p ۿ< BNfK+fÂ&\v \GO4| \I4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJWՃWpi;+|:ȋ}jb*D \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p:p5sR*q;+q3+MWz \=UJL+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4pubx8\oв:G W|kꓗO?[z4p/  \s՗+~4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJWՓ\z_&\M07sCa-w : YG.-n'6Z>6/͇=|ϖ޺yehCte76CWn[+=zt5Q:Qzt\ +6n&\ [NWed'HWdRfÛa7w fOvՏ>z wFNDZvW?0gXcЉO>sMqϛ\j;V.~:|sޞ@.X.αA}ݹ pqֿ7ݗ׵RGeIKF``AK |ϛl(ޞI7/O_ƿn!1'>+|wцGo'ʤf)UGo ]M$[VD^.|:].}廻VCYEAWtKo%v 8ՄVj>p( ]9o@vv-pD+N2"{;kO9̅(bv)Q^9C2_zط >ߗA#44p~q~@ԒL>Y{ٞ[qqJb4hЧ P=۰axφds{Ӫ1=`9t9WIz}"ܮ|*hH쉨9'OĻ'\RL~-JΓ:|YC*DWTFA+lUFAWPePm[vz ]\ Jzzt\~*uwE*h_{%Prw9GBT]UA ;+ìCtU,"tUPB?Bʂx^JWthd /hEKWW?]U nA0x`sj: .qh͉BiXt=]=ꁾ&زw S.|ʶ՛v `䝡׊ĭtUP*;+\'պZ@qQph#͑ M`34] MZ JҶlb Pr+xkf&dFG<`LyWՍR|zpx j#o28Og`4;Ǭ<7VoWiM a[2q鯊gwWit6tEQ[KlH=~>r͂oG#jJVngy4Wiְ9翑[3f]\-tԆ׷{$>;G4/¶L7)k\fWL.)i $;Ő\|yT* __o&n'_SWԼp12_>e}RTjRgFn8Dg@p')^-â?xΗyΞt }ВC+^rYfqyQFW83˲oEog lZ2 }MA_=<\tvE~U CTOR 뒎fgTqϼ~`UJk \nXr]1>emԀʊ*hVјTIܸI\*Խڠ>L>ˏo`<_5osu5<&w*,e{kZ(L՘XCIcv;cVZ- "J<( t~,^0%VfRyO8dY3e/?p?hAv'}ViɃ_x,wn ܬ"U^X~ڜWS=|nv<țBtTe*'Q~L.P8jG48+)tDbF5I3نWLIzrBQ8'A{="3aGm ~GM(u5!Y&qrO skT)0};yM^U]w}S[]M6)'76*)OgVܝlrVS[\ۓSRO9 _o?ukYi1V[(JIȀ$y,^kHBpgH(`dIdBIᒤ eS18*QU{ip6u6f%E0/{^9\ "Y7!:֝3 ,2-EE`_<⚑Ũry5x-|ؘu(l5s^,]mN \-x>ɬӇ;f,>D^[Ş3-3 (ILg?h#k5|-L)*saKUJ>\H@ ;x/X)'rE̖{pfV9L }\ &x3tܜ[nA?HF#]aN)OT~/l[l~Us[JXLLu{?~!6nر[^RFRIPW^M4 8ZTiX%ʺb.lt\׋wZh;owJ'eILEiޣ&}E,cH@XF ʱ~ ڲ2)To;γHE3'E+|xxt>YAWݸ"vg]rl h (u]ޝ=}tO4kRʊR.g}]gRx#y,a>?h#\d'Ew| WzRQMsYeݙ?D44o9o> CPȭOsdpG-WI|_u\?XicU *4TNbrtT ~ U˃tٺ ](HW)䰖;Ŕ@GPRf,Ȕ6Fx%g4Y͎HGP0 DEg},= fN[%M9`ʜ?sf)8][+N*xH/e2 *wf>ƴ.bzz"*@t=LXgN+pEgE-hmi{RҟnNۓRTx]'0ME[Ws~iԍa@#hpZ'y4)|QkƤ9& 78>}myF.!W3 P3,i*C_AD9*IޙeHE:+09lC`]@륅嘒1LA陑Z3((zo8Ln oegp53+ZƂd*a>nˀaC__cZe}܁1҇!/+T$N1spJF+\BoBQHZK^tdXBŦwh ΁> m=ʬ')eK'dƧ%̋u c1r1^$V* V=Bb^ftZR6JI'K5@&3h4&%SdeStY͢U{](WaTd־5jIe֙j$*odIQFmHr; *LJyX|lBAW~w0OH:KN bvڶu(M YLȥNKG̀,CwP$GDMf-4Z|ߵ\ƊM{؞hՄu)J*r #ÏD(tS γFDyc Xt$)YО uVƴ&WB90l^ch0n $9qdM`k6ٳ Y^Yǰ`NqJ>D  $r"=abFe&k ^kWz4Y,rPb/eB^w3ڲE|c-W#j m6Bt= _nWL1w9Z`[ԸpE>X»ڸxr}WNS'4X<*=aSȼ¦-lLF0sd&PXcyrhI3S6&ͼpgnm@Wp؊d1j/n& .l8zܵ ="ήiֶx˕s3{5ݑ^F`%&'"W9kb]ȓe.s+B=xgueVJB: l799 "8WA ʀCӇ6goY?vTeduok1|GqEY(Bix䩦aZ%O82KT?O *'i*̕ V^y[eʙc2}cl(հ|E&.iٻ6W+!~A*XGه]"1-=3 @$GUILLh.PHx3jg>LLj9(J1Z+IEltAd7ۤ쮠y\ d})s,j[X{Wc0ٙ)GW~Z;j;BB^P% S R@޿Btw^{?YyrL342Qh1 @iCs-6XEaX-KǞt}^E>"XCC J!p ` "9C` RJaNޞ6~1ٻv?5> b\灰4Bz=qJ;[c3Rw3HyCQ83H1Ty;z0K< K?+!}S{R+I HjL'T nR:LR!p=pH*/0O Dq^#!rh^b !xFL`^Gi&x[F MBd%@K^\;)p5UɻO7hrnOF\?MP@\-< X*#X48p7tJ ^k"J-5#0ivx&'߫{`|u" Vw{Ԍ)k^ab"J2uwGD@E^@/ !C2 :lx*yVtg=ח+3_)-plqo?nBnm"װYMӥ5YQ2dڨl:Mpwio_(cNn5= ]4881zWEs2-ve32&w+ڈgӏ@_cܣ4 y)KV`|IjƔ PՎiJT}1 M ǽ&\}0l\0=iևN5wLK;z6,P3f^o o3c2U&S0zd^s6lon3tRY ]d1,ͶYբ mRw 8jK$XYmXQ1:88{p"JK%}yg A"5 P4}c84A`b9ŞSCa@6a 2nAF*4.ѲH;"E RrHd 3N3K`@JA$7{A]130I~MzlaPγ`i뉅@?H)0i. u"c '(_}4=iU[&)_hXVAQ 4DpǜTaZ8ɱԖԭ m`҃#`1&h~K MTd`I|up~W[2$ʥ.ܩO_ *bY O)s6.eig_I]]~~+GP@>a짓!H|}ٿcF(uaƾ2uiK=%4.}W=DMl͐RTQg9h9] GNIW'a^L~˵C .6ٶKƅWXˠ TN/K1zutߪ6f@UWa~hFM 8ϟ@qp/ ^rqbPĎ/HUiFv{)-BpOtCSUԎVs0raZpŃa9A.Zkr܄M-Caw yf ,$ܟk$~l(򈼏oڿV9v0*q̓nZN#1!_T XCi%81_ yRsjK.ԫpԵEx>4s;אuvX7;z$HcDmty sZj)Z2li%% fH.=\UlN]%dݻD4b1E< i 9FpS@\ېv҆ڧӃ[QH>xsp' rߢ}$/hCɩyG x{ ڲ8i/h6W)8BBŀ, d#D,u`!wvӎnGپjvtf+ZvЪ欶;cetjăh-Ucf-1'T>bߌiFG."eg/jX=Qo(0OLОqN ]qD[)'\ 6 :@}!^Wi}f0NE{\ ai9^ ])v\)<죛Nrnu|w_@Ta2N>3EwT5^E:(j1]$GFC% Q^LK光_5|زB{ٛPO~η}mn&.F]w;ĻK; 5xmi=y>P_+.vzؠh~AAY8&}i512</٩ʇ<_ۏ2^Q斎X%mh6_5??,>LT}`'1VĶY p+_o='4*I_"T+vs2m5ЊKaJQVZAɠU̠͡Myi?|Mu|XނA]% ys>T?/"tB)f_C0cl'd,܏\p}z|~s^TrWYU@YKn+:tUr 7TNnV_>Hl &p*)!ukFJrTUV}WWJ%{uՕf?@l՝E4IDPOE]%j5wuT6Uhzu,ԕ֫C ՝fH<sWwv׮&uu7*/uTz9Q +J'ɨDD%'zp8{B*ٓQW@F򩨫D-{uzb_C\H˦x[ː Uo!%aNZ^m|a5G9%C*q?7њᨮyٽɗ%Tml8ayB3dZWPTּz0<d݅@鞍[߿910 .fdrH8+^cS^_ْN|Ar^& Ø24zϰE&IR:kCJ:B(cF9bT}xk|<`wmI"<HfWH1WHU'zp4e  \!hAVC+RWZ ŏD>rA! \!AR\@2(rLpkz +XxIAa=)zL &Rˍv7'vT컋᨜òvL}\$~W $w=`J`Ґ )PB d<;;btkK\n2I9pV\!.{q,9wo> onڨ% %VNbaE09,NȊ"Hb9xԱ1zD'u,P˟R7{VQyDp 5X #H8>+Ep$h ɕX :\!+MNM8H8g 4WX)vq\M&e "%ze`tQ-a;c7)-fE+2>.iew?ťT9B@ , M&Aj!jqt.y?T.9 Wo~W?fֱ JXYT85(Tsﵢ"DGJ0?5w5QBX|= Xir1 54s7} Z]9 @L~K l~77upyG _ TtoObrh@YAFW⼨J:GLn\- zުv .ݾ}۷okos{2M%PlkY?@Q}7HW?M -T7jh7Xa/Y-waAsTn|ý̀lмR|3> nniMcx_&='}뗫-蒢F//Ά1HCҼjb MMQTg69X'| \uVk֗CQ3k)=%)D6emQo59EP #&:s5&k.wڕCwqF 81@3#(\n`Z,RcrGm`*Sfs%gx&؛ĬA~un-+.x ?]WJiM'Ke W`WwȜ~ٶ\zt6$`'`x) !xKQesT eVSKYo^pGIέ"sb:eipa6<1 əy*Eu 00 }'XLw}op%P%.U Bw{Z JfnѼA ?l<&~Lx͠b`l8ܰ^I'a E"gcF0!tȨc$'ixFe$dKkO ~U&iizI[OB^Xodv[ (j۳ 8;ْ7v7rާm4y:S7<>8r`)8QxTf_w &} Sg0 nPer=n}!.DU2"o i/5MށNRO 0ٸ&epe⦣IXY8 ֱ@3,P`ԜrV~sL@ZO viO**^B}Y&:.>橊Kzmbn6Xv6ĉŶ.<r4'^c.fJt1vB uC 5/>1VQR%l\6N>JS7M?:]9'o1vVN }0'gPYlLW;Bp`rMaL3W;z_< ;hҸgsA/?>[p1*1弿B9]z9K>ўՂw,l %\~^|V/UPUĂ`Mk0juV"`{&dU,޻f -׭JTh6GxS:MUd5!]U}}QM6oc7܍ NFߔtX }$i>R^bz6 |bpLL &иPԸ+ F:gAuI ~"'LPa^x3hަGTe&@7zymo^Xu3RYqꑈ6(c45g_tW?9oN%dZUn?dǮh,KJϢ.ͱhI0c X(60Gb\DͻnϰI{{IV>Z38ڰ_Qq4EexOs2ZH"S. UY: ]Ô(4`iR ,.4L rs.IE|ԖhI-&_Gs+A{ P)0 W=z۽aYnft]nqG$:&7`0IN#9듌1gbx Å" f%iR9[b`Z%9,&$Pp׿MH!)$@ kx*CV<7%久u0B+b]>2~snR 4U&2x\2rg@I5)yuJPkx1FdBI'T< Wvlh~d2ZGe>4mf>,Yn>\WkS.XVo+~ i|m(6JkȅuBYd\ᕐȤ9ek'NWQyy^{:2 Z.@ j*hLG){)"T9xDsJAh, 3hc-Qy "djR7e;5raw ⧺ Qj pzExA~Ix[h a;4T'y#K魣6Z0N` /#γdU5dO&&jnD/"&ƑoL"FsB{2,QgLVІS'yLʚ#9 6hEr8~ZG/R³~_aqՓyj0r4H0vŨ/,6!rhy9 #-Ko$A099oij7Ϻmfi#72cl/,8!=ɘmx.aZ5J<S+eYQ2 NٻFrWTy=gxoSJ6!9S xdmU{,KKej{3(t7~:{BZkua-r|n=&v헯4?wqk_4vN ֒?n2t-쬾?{z۫_ߧWߦ[==QqmV^#*؞4߾&B/O/~`zq ªak Uy$ ]>\V12=Rd (8x)muQ1IƎ VY)Q0yDxcq+2%!@A gqDɳmh% 6e)zm8r[b8JDBC[^s#Nx7kFT(|EiIR .\^UU^A./Re>B>+ߴ3x|mn=oHҩM"YZvyj)-t cꔎ "Q!hVJ[gaiԀhT#ںx*f@HQ5gV£s1HX7 &W졻  t:hC(G_y}z.OYPg1B#ALu{yӽd%Nse7 -ޒ4#؁\'t(eHJ@@604;KDbF&Ud3bZ,1 ' 1*޷=P[uf*-}̄Eg4([ V!T=MgaYt"BcL'r"׳??5??^T WRksz3'S;NCVH!1Z˫$ ٔq%-8eBsTIZy3vYoӰB]Kʥ%1 kjo::]t;nc#Ů9ܙ|gS~ļꯊLkgt}Ahb]\2`I1h7i/ox_P0ҜYu[(o &ڍJi i'./|b.M/g-Y ߧ-pLѢ.]T;$a숲hm\mPmրK*bXRD>X* Q:1*FkT¬%٤miUtDHdMQ^e u cd9*)R,YmUl&v!-\N'JYM![2WWD밋+1ADم&lk UHPB"SY U 8 s m49:'|jkJ f}a)ͺ!V_t7'|3=ً~qӛُu˖#Zセdd7=J\;~GfۦW[޲Sm-6&r2 6uAm@FjH(`V =)*jw%2{Q,%_k~s Qe .GJQJXIy3*|a3/\ITKE[-ǻbӒsᇿշˋ/9R +>c1K2D` 3][eKAdbYu~ 5ìQiUݦ.YS!±6(Hη>J=vs(^v kkvg?CPYh$FkR:/1% y pk=KSFGȌ %p4 1O#Fc.&A598>Iq(~G#qw'O RT:$ńA:rrR@Z@:Y-JЭɀX'uvJ31,Y4TwR0tqRemZsW/λq )LK_4_Z&Y{)ɫV, I(raLFY8PlJ!Pa3?soSF> F cBHBAZKPQTih,2B$'נ>yr$ Ў!̎FÅ0qLC8n>jgJ{莮[zx>SJ#Ó-=<cz'd8ǜ(Qg eiA)Ʈ(Rh t{4c XMs9?]V72x4xӳ0[cvlYJHRSx BY 6Й.`DGf#3=~yFpSrHdFiAJ <'a2 e r"!|"ڷ=;, ϭmkk$v|_ {}Yc__zY#0湺f}Z|~>K~M 2uf:G+UߞW` >~!狤x>ߓ0BWsc|,];{7N猐s \\ٓ8F_&+:ǛE.SAkcٛɿ|'/"nŸߗ6KwR|pOıeұ__n6f.MۖiU)R׭l<nWz'NWl 7Uk{:G;RQ~extut{z~#{f?!xO4'_8=a浑avuc۫;vtW?Oa+ҽ#eAK~]h2ʷ?[4J03N,; +;W wa=TZpuSg2X6ؠ0.N:N"e"U!؜+$@ߖM)T!QYl,bim`Q([]Z m%x~yH<{l|~k_ Vsg  W0Թ80`Y-&xu][~-\_ cK.{hT]E%ECAZ 6 64 YݱXoMl/t3W%"ewEt1G hXRmWbt4FC1[-v& LJ2CAIWF2q،R,1l VAּ6U;Ac:Hi/.4J))Qp].j~DYip(i'n0 - j ~L> &uVϥ&^jNX%,ӄ/Bj 烕օqLmKmmYĜv&/EacHϮ&XOqqfF hj &z/SpDCIBX˦*e}>{Lr9PS3qk cjbB6n!Pa*H`ر"1X_ J?]2j}1/y 1kK:$0+x09H`JrI!XX |U8nM_=qGJf7CiJLAjJQkԔbmI 5/+=3&掔 Z ݞe9Ljk/O٧ :wNFv-]ҁc넑fD<\%}8v hQ -Ya(HD6hdRT|dEUQ-ֆh~pc'jpW׳K!~f;'y<<ܜ/Ƭͣ(7|:{&Uɳ>#VfeQ(-$L) :ا M>bU]+i@O$B$cIBAR&YqSphn8[8kHv'K(2'k'k_##.FcEb,YJ@`Q~:XTqm8,v,m oRfrW,p2婸*ׂ*3zMߘj 'ﵱL6c*w}3=0m MHrq /L1BO"bh𙖟1T?("sYQ@SBU`mOMWqZ?t7]c_7Uk-\\~Y}2iWW_J~w^ &WS%}~uIXo-YM( LG{ O7עO;z-|=BV1?^\_-w{ح4UNǿ)ޠHB ECWQ1 `Nhcœj[=.jRx\mjkr8FK ]RLpj+Lޠ<%teVbq2XZmK k]wנN]N]Uq_!S]UiSU0o]y +؄ө?bq+xFr芥 BM+p.[qyuV_wm#Ii0w77w~3~u)Ŕ$WMRd,S0@[l5Obu]:*%^#wc[ozY^VwۥRJSr=?Gy腩M,ӿ~./lߟiMϿ8۩=oXMkW^ʋȍNަD,O.FHL2&.BL.RP C80(dվ/?("m(>ji'A`L!+<Wx%04lk] B+r=+`OvBWHIXrz`M`⁳$!$z/S*-qc( Dh,@{ЂO[1 B2*Z驎7縥ކC{ B#̖Zʪ {?%Ic:a~=㑑GW]۫[tUghH=v8EYd W\g,>U&8/p]I)/{DQK'U8[#B%γA\$z$ZMpPHWV@'q`oΧD$IK eLEaQ!ECVvfRRƹt["}V6 HJ Sk y/JXϐ<k >xQd幨ف*X=%WBAZ~8>Lg](z;{*`:òv<ݔ!ՊN:Σr?ie^v_^ṔZZPVyDh)po{QG#Pm)ɭ5 ź@QWfеMi;og$Zh2ͧ@^B9c|Py2'}UYHgmwV`C{^֜v v=Oxaɰ0n/T`+^OÎa^ݥYJ"FTu:v8JPmt/|/0 ϙ+8++W~!(.♴NJIE07%:RZ NX]TF䄷BC"FFrGfZs&)pSjAo'.m8P'^_ëfרjw|6XnrK'^gU_yPB 4rTY&(P! Wi(5`Sfe+ym>Mm`n&2x)]{+7\YXVBJCĬ$BB g\(Q4+f.1T8-KTE@A0͈qZ{ (2 @hЀX_I}L0﬊ ;ӿ@zzj>|%ּdytUo&b=}N=չ\6h*"]vi.YB6QQA#D#dYn÷!%>bP0l9 G; c;4PKz>Al~JN*cH>\>Us)쮃ܞuMP^- Y*AXN +4ͩhR2xiL)1Ԟz~өzikJjoRΤX&4zTGFϔׄȵ br <9!1:0m<1 땔3fPOW])*^d`lʂQKXʓ\KթT.K{*G?u&G}MI>4%v|^d̸L2!Ǜ$T͘=zG˟Cgl/v1)hZ~i&"IM7l]}=>mPWx@Cj7Rf*wˈZXwlVVpOc 6m)"LPq^ 6oc>.FLLUwF aœe_ʊQчcYMD=n?xJ ʶZ&WB4YϴTxE=xj) )Ӝ]6Z>x[zqWGR_;Wd=C?}S.CDL!'#%,38H 𠄧L. ̤~^.Li?խݛlnHW:S(zéJ9CҐᕒ8hfMQыwעv0N@& Œ-Pm|Nqmu3,Τ==wؘauDTO]3LuLu=Uhh)1Ciha2D<\h8A rs.AE`2G))ZEķԦGc/3摌CGJ:tUv]\^x[oǻ^˲~e;CAys 'I|XXFP4\JN#O)E<8#{bETq^:"oiLkS0]q5T/dFSqGÍ G;@ ix{~ Jhr'ߔެ;J,(+b]>2>9A3WXQMCB˘K&YW0M/u4hHi9H K@@)NpH|a{Ep‚T[χFGH#C$1I%paX8;Zꙍ8xVqWnv1)wv-y"DHIY?tbv' >>^e7θ ~U0IE5(mR= Zl (.zdy Epu=%y7\өNNE](w6_k43az덝񷂶up0Cq "I,ʤ\#; zFi+%dN6E{ B0 )k%**@ZЊ1""pLy |GQ1zL>㼯ȹ˷\\{ K:\c8'/au+F6j@%J]_E}60Cd]򸳮sql f@ rdDKaA!xRCbSSd=7Y]=>D#2߷՛|E_/Yz:26 )9CO_N Z'fV!%Ēem~!]`oY j/-;,P^[.W<9yn k+v'PL//Ηtp5- i,UqAu!v,W:`KM'ڼF7Y4~&Q[׽ڹ[=gd{ZʟrN^6@kjVoΊ`ЯۇGB98 9zWBD ι/.";\ kf5+ΤP&ިuM> #xz1"|4X ß6*b"AdTS9oKh=g7כH>Kv wQWr%&7ht{ hSg2^8ֿY w/Y mA8Ca29O+tnjAچIgq_bsʻIwt6Uq짲eP^9]Dc) ό+@SE&V:ϸ-s&.?90(M:0q)$0*1*oH$99ppJ&fZ>=Cegj{8_!M@dl\{#XgYԒTbU _DP"9cN825|zP5P t=Rv>"[7|~W٠ WWwWWd).H1zﭶIX"l|)^gz4*:KխUNU^TRIMe,'>7VR"DlQS똱;0cu21{V#k ,rq/32-1ZZ\)9I8Դ+} dr>QƆH2)CdE'UH̅EYKcHb:C":<̇Q>VFl~ˈ1#Ȉ##7urj^w9KL (1BF]d1+BJ_*M )vʈ;dJjP٢31䰘bU%Il L2StaF|PGf|͒cyt̋zőݮ-"f4)Aow F@oq9JRc4`uC:v1LEa= s<KaDJ&cIҫFVKh36Ua5d(F*txxQIH$Cj#:7H=FkDUDSh1gRr;vŞ \}\3;l[eV 7j |r>KrY'ͯ/&MMA6I:J5V'vq} з>(.l;L˫걬NI"ͩ\fC-mҞwJW$~+e{ Â?Mr`MVzQk][[j%l rP6K%zzQ/&mɖoKdhuI_X[ai]A˽)@D;e ѝSrG ey,;BOi鶙ʛf滾qܮ/TbCCj[7ߞnX?pUƋdOTLuMb#k"5Nzi+Ok0D6Ӷb_a4f%hӟs!ދ%G>BWM?}n4_yX} t.ZnXۙ~]L~X~7vdyՕ-j}uYήk[z:W5oUƗF3{>Kw'l^Lh6.M5ԛ7b?F~cqAKR_e}.MiT))M@[~LT;=/"{WE̋GЂ2hʂS[E,Tt@CRۜ5=k>ݓEQڢ'7U\[xRQȚ6t}vpdq*[2m̮hS`CK[oU݅U@12׶ז^ SWɜ7bf1Y)Ʀ"To56}/1Q~S?>dtd̎:fG}0;GlL"j`PwJGIu"Iq^ӂY6ta:/?|'k~v!t:ۨE&4& bjgP3({t%,LRB]b@R @T!*ݞ @i-|3mnmV٣0(&.^/U{g_Ԭ0Z*gdr4іFdC m4|@RĘ! e^n $/t%C,xS BQ\:|dU%kɨ@&:Jgp^Y}Zy65ƺybۮ-X@Kٹe]ꮮ7ˀZM֩&TA6p ̮t}޲|J v^h;]_:9W-#+@@ U@ОHjMZ I5꬘ѲTjSB$hEj.9"pv:,EB+8{sezE4]FV%#$nin]Jsuys*{΄ͭJi9\ U%)hD1 `9D@(k WӵBߞvR[KMBK*:OqQ)] Cb֋=SugOɧO+ŝJ. ;`3!XDqUq^=j^V MJ4k ݥP׆z: & $\ IK:3:iG*}dpBP5g^?uNf:o>15X(,D:zPtJ"c6DY$j+)øzf%qQb;pctR[4O`~w|wu#7mS=6"`hG*+N0a݀on@QZtpG6,1`J 80Z0U%FWAhِsc} qtmtذcm$չЫ$i1$e?'Jېq"t@ttN$oCpJ(|CKUtp9\X];s]=],DRڐ Xt ȾXSW#4y\,OXcl>Sb>fU%MIBz0YRY7:Ec{%޲D;ʿNjotu7٢/u8: GuTck+q 엻=3:NP:L:+Pr1Z}Y(s֝a:i]`!`d)6%p3+%;% >6&EZM=t-5*eI.L@L?>̦eLOsYnlL@Q/mB%]\^vO:!D^͏n$иLˮdk,fi=ͳG'Ù,={);+~iF~[!uX&g6'777UHV-JUĿͳ`JP(l+`tB C@G 0c@Jٌ(bƇ\  DY}'ٝi ׃OGe Jd``L3kiFhֻv44C6 "O^OCb~0tEhľD7ҕDW >Õz(thQ XԐAa0tEp=+Fưs+ko6~0tp }XT8s+g9ƀ~Q(bt(:GNA3Hp ]Zizj'JA_]=^a=1]WǘVa(3(+9ձCO~k/p+bF ҃ Jz)]`i`z?">喇m+%o1 >d=sB o=;WP*`DW Xõz(th}}8R 9ҕ 5n(th]9ҕ zH.g,@0C+FDQ⨮ΑxㆤE?b_tQth;]Jj3+/C?"v@l3Z{u(ݸЕs w|rWapwCN}}+u]z!f@tE+p(tEhP}+FHWgHWR>F9S0b8uhO0F:CR˜ggQU Y+4F4v0#S9OpOӄRHgI Փ:ۄzRj#4mg[sH骧WܪW߁N|Ds*j wOoef>!?)ݪJИB*7fq!Lm '6M#s}矽gQ=7A:>?+ hX_[Їj(&B@Y,p6yCюzs&LEK)je"%цsPU,QVɖVIum0v4GYhStB7o]BRVa85s3u@()ס]הR#%IEֿy͌exj1Z|whTUiԒϏ}/TF1S"sYGo3C-m*0*40vp# 9eܻMCu(.e`0 ;kXjBcW91F<]UX~b\1e?ņoYWT|^;dCU,F:d %SR(U-рOBr>}=oBмA޴\s9 Sfݰj$7.(]cpKNqI>I5j'-ؓ9fqu4~1`یU9 ҙܒ`/DmT/ *Z}ƥ/-@͂~|gEgj#rsHF 9yR ;7g ,*R>Y jA 1 ՞utV@N`Nڨ/ ~ E*Us`#L$X EA=A@EE; O#4 h;oPR6,ZJ2TjO%>,d0)/VA-2 YeV\ZnCfc[ 3泴R#u˶ p _g*ДYn(E iTkt t{zèl#Z}__8, v`[]Ye;ՠR5rwU #÷V(`_a:\e#I:- d>QAQi+J_@ezm|#c9MF39^]V۞dWNc@7CAw^KCp2Pư)0mߺ 1K@ JAU.|X)𭥘ƒ=˗YtawTL@TCJB QCK@T`CA.:MC{٬AὩ ,N)(J892z,XBEgTy'T),d@APS*0#I|A+AYr|'q/ J*H{m>EFAyǒj0 !1T(`5Zѡp`Yl,@+ Db$A db=WVcG=D & K_(z/hOXPG%4' ǎ1*Tei58!a%wsƣvi2T/jÚS~{ΗďI [Ů|`;w FfNc{T^> A/0JhIWo#f B]e \ìc,Ohv=!̃u ]PAo%CJ$JDOkk*vCQ0oٮf1 +Mkj] $\FC_ o Uް b^8R. 1l*@׶g[>1801>|7לAPK}4 bV<i1`EHn֔7vLZ| #+M F;,`=Q8I/= f6XOQp|JiSܰd@bz-i46_fs˥ZpUt8\CC-Z*cQ4k&)Ip$bpnV;~۽Wʜm~q*~p1xv߼aϝɯ3t}'C3T;U8^M:$ '+J %LВ$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I I=$W$ZZMh)>$P޺$P"4$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@O7 }DzI >nEW31'?=dBIIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$n/* $Z$ВIII=$Ӓ$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I I=$Ч÷|`y%jʣ>^o\<_`ܵ>~S.=jM%>&'Ư&Pƣ. ѓ.n-uorXXɂ=V79t+\/A?(w왲So_vT(o^'m6Zk=j&ͫ>nLk۟_V-Kl=G\o2柛%6(ӳucwۋ3CuLQva q?k)pYn?mv߾O6֎B3gu㷂.kU&4 rf%pƆ *He0HQjTᱫ F鵨'2(y"`CWvk+FMztR$E++yjb6GjFS8"@j5tpZ ]1OW28'HWQ++Nok+F8"]%7WDW8v]1\Ck+Fc+FW.{7? 7KWBku ڣ+wrBW]zɛ"i5tpSX ]m4$HWFGDWl\X ]1ܘBW@kMxt(zte_Q&'6*q7=W7_٭ʥ4 i>vfn %4h*wS~0J;//yݽ8^za|O/ǗFibͻ i(o CZ%_NU*1d(I,4k)0LdURp4Mv=쿩Lo1%$/-M俾bRN}ϖtP4nʹIr M)Ӿe95wZYjVHCM2QN~JEp!ccL+(ݿ^У0\B>r(u=3S\]pj5tpkm#DzEZ|\ f=@XL;XYRKgw"ɒ]B#J:,^{-tUJh:]wt J *eV= \њiPMRwHWZ힪tn]V卟YPBGWʉZDW"՞״4e3Ə:zteA[DW5+km ]`A ԉU(җk ;硵d~JӴً R]=^VmrZCWM+DiH"BBke-tU t*(SW3V.R1U˅>bvkgQ'N &ݢY0_Y&sHdJFd5"M%7x"EtR)[CWnMԖgd :ztm+ ´ \MBWV*h:][+1EtlOĹUq Z#NW]BҚqZDW@kխ+D %HWF Հ==% ZÚNWe JU6͖,u{fK>,w}y>]wtЕ>63J…O;y(MJAWV}9BA ۋot\BWKo|*(7(m]ʴQW4MWIW]x3׀Xy2 \rF9&мsL+w[NfirYf4>,3 8I,&c[+:lAgU 䆟rv+X)m:~w&șEG}S->O`6>,&O<4rf?۷O_ʿ+wXOAv#$ňIPF[U}s]P0ݢ)Y-~R;~q/~ZloysTyd4x7\]U*Ûا4TGwWvr-5_.O.žKbgkC[+m_2e~Lg}HVJ d$#jY՗q [㬈fwxb"Gft +J˅nfm܃xLIVe@dtGp (NYyVi/ Eׂ Gn<~b9lboЕnn7lѻ;ͅC1~Ynwo[mo緓~wU^=9^?Ç/>>xNnoY7ZBX}oQ n2  iwj7}bقK1~~N,b\1ئ-Rm`reޓ maqwqVvvR/? V溱,D y4tÅ~i׽ n8,Rc7^U櫲CɈwp~3F{J.,{o"0|2Ce\_Q][ (/JFl;4uzRm‡dK{$UIG^%E3.Y!~ek`=CjBugx`n#fO{X>֪}Z,@XaMHi?ǃUmx?!õnxmt'$gj? ByAG&5dz.~0FrLM0uU,R?hH4tkVbJ)ZO&M3_O=!答a:ou`ۦ̧<Fb⯒|ӃӐ4s3O>|nQLv|s%"mٝ Q YUJݖPM_(-햙ƅP@ڀJ=E:M2da8tFh! 'VA $yuf#L,F6:`4P-4*XEy,DFsL*Y%K#y|oͰ&p1~\) AF,i8⶗[-3?V@xFW*Ew[c1q,+\uhۃKKa4o,r؇z!dR-[)-5:0@yNzZ%Wʕ(#E_-Q!肓jFdg,-3sT,:%ezcBJƅ.RD(묭̀d yf1!s5Zi*2Ypq5 Z`2''lrdӒ~G~%8 s},p~sJ7_vyFOj0}s),?'ԻԳJ֢mjКm \ڲ-[A5۲t۲m$LLwޫ~_C'(k ;Xdy#˽w+"id[fւK Rp%⤞iaA쀹Pwx68eIL?sI)iQ^W[rMbdyBePXN1e9v:SFZ'L 5Z.ʾeixHo'y$ёoQ;'Sѷ]82ɬѬ_UHY9ďD)ԱdlU%$@&θ@@\hwgRvKV%+M]噩ѣCHC)2ͩqZrcƔSMZ_!CAPGwR{;9h $$%kg##5Mq#ib3*NMXKisTlƉM^Te'zm7_\k`^hd4E8db[bhZkw͍uPV8خfkk=)- E=J.U+33!R*pںLwsDxCă׋g`_/olB6Ϯ?^֑O/CU^mz,Q3{#y0:Uk6"eRs(pbG DEeCnDX BV |/!5>bP0l2s <4wxw4vrV;8gNTϏ -p}&繻5bs7r/s|#h%VƆ+4Y$R22AHu:N80:tvY =|ncmPISm!M܈%6!D)sO \뤐t<\wa^P18N_9~{J|uSP&} @2Nz*jZZiS^87f}/+fדifjta8dchPO2.va'ّ 6z^(ImT.xEe9!*sCbt`hČ6 +)%\)fO}JLtرĄZ4;|fˉ߲ڃ+.|6V_l /ܷ)~50n_ /'\>CՉt < -\&zw/UQ{!{F w8%hGfuCkCVu ȿBVj-iꪋ'E;m}ِ~Kn*\bq6:L=H ںt nJcqϻmM 4Fmx`E5Զڮ*[:6qQS]d7zn6hᶷ7xtg!viݥYnO7/<ra)B4YϴTxE=xj) )C9G=osheɦy3vAupO'Ec)LS##wF!4`f`&uzYC:/ξTNL7|7/rѱ]{:X{R]Q~JQъA;^̈<{.?&5Sp21_fl)hsHK6:\̰81{wؘaգ55>3N̳߯u=Uh#@'E,Z]5NuD m)Q>piPNpolh99"0-cô3qv [io%ḙlgI.CKJ:e> tl{y뼽[uzMnr7e;CAys 'I|ʶiNFRD1xpʳ-sbeͫӸl/ )mA'mp9_`TJ3@ 5pOBړuPIxE G8' +5꾐6`|Hs$KqPWQf饖]& )[aset>Rx0=Ѣq‚T[χDgFHbJq`RlDq< 46@`褈$Lʝ膼F]alr"Ѥη?W1o'}8*ۢQ*q6n-lh]~ ޡ@(|,a'8[M,PPB껾-8᦮2nr <2(%?xǾ&hF!?\O][*%y+Y4{%d٣_b4{B=Y1TہS@.ݶˍ2T.Wg8I5Q9)!)ΨS!-[btbu:C-_:=(~J.cx_E^{'Ԡ7;&'Zq4É*򣛓j l^[FS@ȁ\4gXYnqqRxy>Ta<ˇeT Eݢ~j&%ݖ˜|Al}L|&y3|v7wCmSrGۂMW6¯s41 4=>r2:ܜ tSE֖+*wVO5KיPuNko4 7zؒ 7B`D$Q z41N^QJ$ģIHp`0lzG B0 )k%.**@ZЋ1""pLy |Fs'ͽb>t{1ę4]2Uys \kt<8 \.IQJu݆RY"FJTZgL)JF )ׇ*[T#!#@81퀁UPSA#)BuKP p5ւ୍c!TIC{T<ݴ&6*S-sst$Ⱦ4s?]pTk:z%K՟i)ȍFdעu` )KRBQ_z$<(G ` & IE{؁{rZ@^}dXP CLMEj@.+"gvcPzd:r w0C__}dlRr.HNzBdRds-pf䐍MV]&o2A :_tu~໾%Mwm\}ȃ1akL{wɷkExSM{~'L/Lo' N?"W)W %͛p9R˜^'jcq3OoSN'!.O֘7 2^1,"~hY+m"%ܗYUZS Yb`Ba9j7e\QEZ~jhg3{qBNr7{9׏ѝpv%koҒ㍊O0.f.9sflㅓ(﫬{ͳeq<4^CʲHq{Z&^L9_wr/ Xou4vf.kl͕ۖvp _1\[޹u5WbʻQD<XYQe4ʜ\̸ DE&Vu̶QoO2]ӣL9':0q)$0*1*oHXIrN .r *pm i L } {CeDy%;Lu|ҙ8;dn>}[돏t< L4/1?Bt)4wk%Q4Wyb AEmS[ ڲK΀dd84XI ^kbBtծ>Q|ZwY ӜMo7KOgzoSq=K;M9#rĒ6Vzh[3= Xj9~xXxvGR>& ,Пv!C9:$֔+Es =g)2*S{bE`hBYϢh\;Jp AkdL؝vb!p)QnyPv5cg{ m(4(h44f9br  w^xk:P Mh9V(RrwkrÓaΞpg69Q3uBߎlBH t؝dW\ޤJ;|@ݘƧ\f2sN2ZTkC0 yw z9yMǡI*7 2!;]dLPh8BEIFЩ\xؙ8v }Ac_D#q@HrRbM =SJЩsZPʘN<(mNs@&)ZqGj&䓔Wi*$F?]>jX-ʏi3-UǸ(\pqq-ШL((J"ʢWD w--A2E(Spm"骠WHWRПpԝUة. (͞Ǵ-g*}w6MZ= CQ}kˊTcfR rhdko[-κxqaK/Z URJ_C] 65lspݧc:->mrX's5 @L^fa=Nkv/{Ԉ=>=n"¥ib4SOWY$QdB  pjNFeNާueѝlhILբ^|\QzDI(5c]D #J9 `+ZCW S-tUkIێ^])ÌlSFl ]\[ ZNW]B`-}5kb+m[誠5P֩HW ""5tUd |iA)WHWX\ Fg8arbҍ',` ~f?(}Y:VJKT*ΉTTQ90Qx;O+}6 i|"v۹LhO[Bk*:JR_#7[A˖)l8Afxom^^GWG۰4M}[}i9MI΢M< c,[ᒌ"IrdQ;CL`^PQ;;@xs>WM75f29ͼtge#f FW!Nh_ 'f|Vj#V[>7V_rlceH'**ѦejҒw.rw`ܬ&1m޽/_ox?Jc7GލbM.G?6pdֆ2;qHWvN5ĄQ˪Վ}F6S }/I.]`ny0WӓJ&s )dBCAq0F䨀3CYRg}X_,/ە\JW2&>u"(\Vp*r_&N(}X耳4뀮Pc[0\.G~Fk6x:%ۘ5 [RVo;YݾpH"uGcX3ףE{]/QԷת jiNNPyߣnQ7\Z^Xz͘Y!zgK8I(G11Ǽ$ t =He28p`@zTIHh8h4" Jcp <žrO!{L"DP>e$(aYzskbI2 8JؤY(}Q-(tB],sXX(psC:S0fd aP貊8A:,;UmUFC1:S/d"]QK*) $Zd9(J6qu>v@>6NatƝ{ _EPws8c1ɔcݿ7k_~Z.Ňo\}U?-D={Fq~`)_;g/ҏe׀>i ºiNߕ.ql>f+9HD<0>t4 rBBSeIU^#B&A0_$ dM⡥Np'," (1Djfo,Hq%r5]jƒM;x8іh.JS\ E)"g$Gʸȕ Vјg:9XОO1 t(-cZrr6F * ;Hcp ]j@%%_q#0! eYU"b ;6{RPkb,O,cXNw9]s }R(<( Xܘ]Na&1Qp2VkJQ M`:׈ӊBXxuowy4/C8 $}*V='iVeyRcz޻֣N{hXlX\r>l&KBT!:eLG$=F|:g( HJDkd&)it eu?Ok{6Ě} Fei؛yeHcBL\qwk~=oYka880Vqhqwh$-1*,s7f}Og>ZY51oCקqD`Սו\f^kGN#{:'(WO&ntɝV/^ $dp%u!E^ oc]nWz}5')h !sd٘9,AI,|V*(kJRNt&ey,1#2$1#sIj ,[cpβ'K%ָ1A>[$ЁqLTermՒW׿}s&|)r u[L9i5 5yL1L֫h4-It|缍8\} Kޚ:8Oy Ƞx@熊]wҷ]|BB7_$C+ zHoC MB5dnrօJH4a.HtP]!B%"s Et$YH"[^Yrl3cJ1S` ՈBrA ],+M6ΐ5 9ZkCWmMk< Bځ U%?t}*)ȬgR$ IL"MB۬fUNlR)Nl= (n@O%r 7$9KQ%ْ&EsY#)rAWGQ-xˠ}Aߟ䜐=^N_q_@#&ړ2>MIYb7%*,^ )ƒL_5ORLQsU"#,܌dG e,>3 4kQ;BeeYk#]=HLjI\m#sC}w3fÄ/-o 'îZm-AgDtyf[K<3 `ĆDrKzE%#"yl˖ `,8rc*"xz;H=|k̃t6`0&DFZ"p&':7Ni|uAN>X'3 wrH$=̚ H_,hL.@j-{e:T3JKI j7K I_zoq2?'F׶ed`<\U)a+sA&2,S5L.O{s͏o"[)-Qؤrٻ8$W >*1xz HPIFVl}KVʪʊ8J;A55hP̜|3$39Ә#6x/&_ +'KQSG=IW1 3܌VcJ=6.K1{`/"/8Z8ш~Ba|Ø>\iʋ[.ܜ#Jo>AL??{_IaFϠĎ_t؜OزnryuU^ϨGloLW4;Sv{MwxmQ}akb-CzŸ9dfr!p~6[_b^y.fE/պp[ܽ=QAJؽ,Հ; wnx]G?]MlL{C{ao 0sC]7P4sFo: w[p=63-0Iq|ކ Κ ދ[.Fcn;bQݔM\_#pKh̙ _(?S;Wȼz,KV xTŠ#8:`2/irZvZՠV~OZﰷFImDFm eSiBP)Lw@g5c_Y;fB X7#Pࠬg<&s$/9@4 T'ւ;G=X6^^ E:qȞQ'Ս:]+[a0: k"iHYD>e]6%dp6y`6%K/~g uҼO\Nx4 ͏&TWw"XHxO$ 6zsO?yԺv^qyTz 2S $KEzKE7#6OY޼w}s4^@[%J֏L:?I^Ly?oMy⹎_gfzّ_h3MLnT o\M>S<6԰w qcsG>W ^S@ Rj4Rڄd=ɳj;ѵٱxBfGhiA닻<=ZZQ!ILҗM))Y C.y0 F f%ZKdI@aL[dL1|6nRٞҤ(Aa9*Uf븂mFܒD !JI2ӦAܧcPĸ8)f_ک-fcL aIk%,% זx_it!f$7Qt`Wans]\PR& ,L.$,mB)y'"ZGw {N&.YE/+ |"r+{0 l6 8IzBh&OK{4?,'>>iѕUqWCm(鸬I߯vO L^]M.KUѥˏ? r$~z$;X׷ S4/v|:UiР3F˼OOwX` S|u/5 ?iՁ|X>-nGi#8Y*Zx_ÌHSmʠ~+c-etlG$QGY!Ru݊%xF$^5?xxqPӈ|rgRU!:y"e+WT#g01;F"u2rpٱiR@nJ7A] WK.Mw4:{8LeJF蹿Q+ E?[ML<uӷwtJ!T yާSyYً=^9^1,/713Fe-7ޓuSRcGuGE%}H=+?*;n׿9|f)X!F? uS8ʁ[!{˞F ]e3m,3,f;s0act9Y\,HM$py1NeCP%-J:,4WYhTp5Ň9=zq8>JS)WiEX'} jzc$W.%SzpU66h%PHs~7 z wMhOa?~;;n!ع*͒:+b=+OjƟ!#KJv-* Z YAOXVxKm@Uø:nX@:9YݐgKiL*Ab>LQL:pK( qR5rvH! s[}P7!,?s͛ܵEVq-zvm5tJH#_T>-rUHa Zg0̐?@g@庳/ ̐hՇDC攂3W6=w\ȬA踦1^J[IRC0):U&lQιE k,vajlDА*﹣6> J(hMy裤0:yQjcllWTB\EڶX 5sBmd4BDj]Z5c/Z-1"FT1ydND[m1 ҩ9Dp10qH+BK*Epbň%_&PE%Ya܅e^*a2P%6S"w21b5rv6۫ˏwRjvAן֯PX<{ٳkF2)}RZKAFdZGk"{$8&c#N׭vZYT I~u<*/Z4FA 鍂6-|W2/̿ز}:1M1YE\4x'Ej 2e}fVr[,R>'a}~_dqu䬹Fv+iq0uM^ 8®O zE:SSNriWN.ܿas_}(:>`S.F +F,B%ǧhA:_0B;V {do|{o|^ude4rm}'+!-kpՑ͈Mi!M<8>o41:`9lK6-(@fPښz!5 nUrQr @9 `R^y\1-d$\Ir4Qe-" c.8SP\!C!J ˜8d"^?ST^UFΎ[UJSzu֠3l)L"6$ PyLEpY/JJ?7$GIyDU !Bjr7A@ڡ١uLKAmTYj|gYeTRسa>M^:Gos䤯S:WX|mzY{&_hU}c42C0Oҝ#SJ 2\C[q1hAjm)@F"BPNA$2.p#s ]3*ta5Wʃ.tႢlO[ tw]eFy7kl 㒠I$ཷR5(30uR*09ut2+AL (jS F%+||;)l"m#fP;\Ӓ9];AkƒWy/3@  qn\1:AsJg[ aUNkXiC&dyQbq(Ph8b5 Tڪ]N<&(ơhjFT5:hăF\!ˠ=gޥ$3˺" 9u`OѪ$BxL$фBUK\cƨ>eJgB )4hbI)= rE U#gF=CzՁb΄kP-,kqɾzT֋zqv-d"K`ļq'~"0ٻn$W4m>/ 6  d$=v[d+ɒlaʺjp[4]<פHvGnk׳X9 h_(}jG\͵p,Z/LV {7,+X{X]pe Qh2*W!fd5&8~./L;dWsR<ܹW&sp9{+:j5ytkytu#Υ\7vژWJ0(lɘ1*/ԑ8WNyrPXBNT4).1F#AMئևeO\sRm߅xNoZ h^vb&>Kڡ ȟ2,DP`AQrGs2wod*։̝lYznsj[queJ@$b )L^&D ZF+Hd_zq +p2=@~+jĨ[ha;i9a3CWʬ`l2Zҗ:$levJ!9q _G, q!`ݽu KSzkm@v;jC*'E %VIK@&}'s)_&t|[H_;vyK|p*>ՑK\@ ݃ry[ dIU33n2@(yq5e{F $nƛSљmك %RxWg;cTT0)D6R7R q0P wj)N B](\H1sI$8܊Z?b0sj䉒p~9%MJ0HV mÛ tj븉|Nƣ8۠j<_rR7Gi/'w7g#LWT +cODɜTyMR+=r/"+*=LLzV95K?hTPD~5xҾs"\̺H&B!.*ff\̬hqT>e(g諼G1%3<ү8K=*ͳtgW;}aUߕ';n )"캽ܽkMiV Vf}׵KG5c9cjͨu3ښMZxCm6_}M˒lݾuQ/]yg| k}FlyV▖ O#l{ }0ާl;5cz60IہNڌ{8ikO"/g_Nw}m(iȱ,$d#酕&ɾ:.W.;'W.;$W.!WZ^ZYmҠV e7<֢kK)IDH򰾠zN "> ) }H)ȝA@ɔ*mi˜-|&%~<[x=YMWMX*^kԏֆAS7/ɍ"疻#)S 6hjTs<2Z^St}M7YSFIc_.~,Tf K>;| 4OFH0;yavV\ g~`44M4i?\ R)2wBa(o`) ?1~MK: 2٫?@z12BxxgSюi8t(ggҜ=c-h`3lɢ_gAæ6hX|;_^п ׅ ++]>-տ钡,0ܳgEZSf]_2!"Rir@SBm͜ 5՘6"ZGKwAay2+y}P8hVUTv/&l!Sk. <![+8xcDVJ#ޣU2ҕJV1"C2XA&"\6'#B!4`!j[3g32UZfƮXz,<*,\i&n12WҀ}e&O9{o‡@'KM:y*몑*b+ c&U+l*c` Y-qGLhEJa5s6#v^+ j{n@y/3QKքl殄$‘;д }]΢@21CEG22K G(Tt⑜l[ٌS?=F2EW+"Bˈ="VO% ֺ?ޥ$3MCLNZ[D!).>+>h41ЮJ-`,o5(ዱB% T )Dd#.֛ D]XҖ9yW3.u6KvE2.{\\{]E2ʸgibBAFĽ E"ji8 @.}bW<4 E%꣸Yя~tJ{sc#B$(523 j1 9F^\=x>9n9S渣-9*t_ 9J  -$.:1L'TR}waHrԫ9:7@=kDQbIbNpV؋-9nu;p~ Ow )+Zh{\ЦS ӌ4wxgT|+M=#zrws6”5A`PQ2HTyMR5~ZmY8VDfp? eZvb$m 4c"srhRwye~n("JZhҾs J 5X8k}MJ2q11Qik U Ώsh4&u*O#*wwko'b=*?)x,L,`=F'[]9D{:+Reys#ך62׷:vխYurcJilFn_[Io#i6RKF-gw>yxs*}[r| k}^}>mVa^Ks/&e|`wϚ|=-am^\^ך?\sӟ?ikO"/g_Nw}moƑcXI0F0 + -)9}A2u2u'tC K\+˵ cPIxc m^jQK o_PDGEHBGA>zB dJNF2g:|\Rofoa:t%Jٚ ((S\k_ D 8ηw7] KnmNLݵ|.;`of܍uN:K\~pg,=}]B{ϳ$<+l'3D/֩TiD13 nG7W"1Vd O[ ,6W"K@_x~So8>^KR%)̳^XOWGAb+=yʂv@Xy<(uY D&ب|]=6;=&;=&;=%H@$LIB;ZRoAaEzK.Eh>]>5'gӒzB11=vߧ{fd٭?nMls"aTh>\ HR#"6}-`,VOB&Wn ]Coxt} +RhҢ*]Բ5s6'yVֱm,~Ѯ>hNj5~EYȳokݳl_݁Fë$^/Hr^Z27ɐ?s(7SCǀ%j9SS8¸Z+ *)Χ%R3 t\%IoHdxfNycsiㄡ>м4I'G@  m $"g>ëYy6Ò}v;2%t_B^4ӻX} ;uV($7#)BKyߝJNXJCDȜF%%oVe&F&R{KtsU&h.hCS0$]5rv ܇ $)K ;Ä0,ܡ ); I91)ERl͗8 ]PGXDyPy zJ5LL#". t6M2AwFnro*} I%|% t 4A ϧw tGUXg;;"F{t1dn3XyA]8҇ l݉€E8mI8NQ`<$uO#[HLXeP"@p I4y;GA"T290*mu]hBvx-VõSeh7!C>R@zH"F[ Q{BcHH!JR/.A5[#K M2n Ř>)+e X}(H%~tr1{cx"`Q=RD<5ёXE/3wJed1:A; REY#|2ΏH[ rlUFIgqM.0H9T˃%/R($8uR^ȦJ/0@# N )@F)M %sEdI8$VH :"E%HT SsA*Bۖ`IL5(#9>\-+E鞬ə϶ga1 /맼5MVVءBGB :ʡLJ($* աy2 _F%_)M'*/JjbDpkCD$q_" { J L=BL=̐87{Qs-$X(!rW}xuucj9d\lb`\y_͡rֿT{9x1CΜlt]2~WUZX{F[xJfkGxhhPEt-HOJhiJx@hh"4F)yQ*ɍS.Hpiaζ[YjtAH\j֜I-$gJZ8z1 +m[#g{xu4G?jOjw?Qrir_~Q~>,FXgr%HST*4އrDk?:PLNx[|&EӔl+y|t \!&:TҊ@_H^g> 6bB44ԁoly؍^OiZP4RL$@t!0ڤܓc @ay%XfVu(=|p69fggxΚLe_CJE$CrdwY h5 SD_j\ A!hCܷܥFȼ\1))oE1 9#bPU&eL2\*m cp[ԭնV;ru`Y͕$+!ﶋp|/fs{8be%5٢Y}~GЄ&VAaEDarxEF )An &h}v:nWKg< Yz:41^%MZ9<%& ơ:R48=N J!;ӊpa;I]`8{d)dr$J2邈"J^4ƸBOĠ&yㄵԷ&Gjhb? m#@Q۞m'"[Km(,lgƣzCOTy Еd6"NxQWx>j|hBR 2EۡO@{'695"R `uo4co4ٗr (Y%t*iteyhv5ͻ>GUa81dEld,gfQ[fnvdz3MB悟*9ø*Yդ5=MَP ]CU)骚$39՞#xy̿( fB+[B}k u]sx 7$-︪ƄpثQrW. {6\d a'y/R Lc_; 8P;K;T½AL>齫/A*^ KMHl;6hٯf7{fd_ϰDl'd/ Az{lw|makܷW*[iWyoe]NC.z Qon;MYrxHX+aFgB_j[0{Fgdm& ?|e&[4~7{9~xS~7;o'/W72O) oê6h6w)υ3ތYy3/TGIyjsɑ'a]eP~IuSm>-ߣG}"`z|>n瓗=8hGy~ʁQ6;QF;UQ|_[:ú\zJ"ҼJ"mXNy^xoi6=†? >AVهLʲTZ/G ]ɯTffMHE5oKю$mB 5e0Ck ͈0*2ULht*M$KxV+O鮵87>3ͱ_i> h?i|Yl$z昢wVwPÕ}p߫IWhg*<嬊 vWaٖks'm`2\נnux`hP GmgE-1a7᭯UbСFMay/v#KLD.ؾo}Fe6Bݎ'.֜&㘖SEG;J2ѳ N^՜+yEGA+]мّmTogK;Zح (ç(}Kz56Rbd`0mpXHWQ!A9Ч\zk-o܊W'S凜^NB_߷WQ˸u.iqQ-;[Is!6~nb\}l%f^EH 3ҨֈD ES7Vz(8ބu*Rtۚa$3!UV4Z=4kqX)hcIfRѷ*g$5L##G B;H QkC% H_5("'5rvR>화p70jΘ pE|"/Ϡ<|g:K-FUYS6Ewӏ"^5H ^CZ2Ǩ 2 jsIBkA֠;?{K|ޓqdW̨źnxv8&:e)a}_UwSDJԲIFbKbw;4F" RE;Aop9BFѺa[uNsHHL$pն_g(P_;ypDA f(ٹ߇gG`RGTRJmɌ6>)6Dc\ ¬5qKB<qJ/oCآ|: N8UŚ,Iq;uxV:s䦳~[7<\AEp6H9?EE/6a~u3t✮~qSygiqbXlt"țtoc3hê)!}^~j?yg噑uy7" ȣ?4Qz\Pd  "Sqq⯘#0S?{UH/aUvjpM6@Yoͱ\A+QzVaÑ>ؘו&LRL9=7$(K9Űe[WnK(#`KFǤ"9̤NČ;p`H$w9 Kq¨1(Ɨݨe%3:8ς=8m=R"X4 ʺ|pW^4}Ѵ !a&)^hXVAQ 4DPǜZ8S:֞%H1A3гƖhXB=fl P$@I1E󍣊0^_MKU:b\I.yJJ]aQa,(\eT7i5rd6N3 OjnS\졨A2R@+P^0#_ Ptov> ٌjP(!^|\O1+z1@Ë!O'U\;^0a1,3rbF; }!,5HxwMlyqҠ~q>lF9*c{{oU!`u e?0b&o>>Q<>~U[oE:T`GXzpDMљnwd*Q5!w~l* Αi;XKd\rra0edq}#?a/dAGs|mH<VA zots+OVuX٢Ig3Mc.Xѹ *>| G§\:S&y ׅsmb>Y$lY߫^2^Z_Ϲs șwhu1㪊v兩'5S0~抛s$"7QS/TԠlXP1\ B ȂJ'3Ƅ-V!lysΩ1H\r‹ ֚XD@m B8I]ʣTXN[N|%~q7|^µN + }U J%;%֟Zr24PJLX5 @D% &ؖY"X;Br$~[pp o#z['vl"(cOA XV! Ym9vH'@y\HFO_;IbsJ@s'e$J^< `Tٿ{Zaϧqbͫ-}}=eCqP.VatQձ K68 MHZn lkఱ%.|t6* m^׏]M^la` b1M)N}jnw^uQv.͓7`.T)j>U5WK)Jkl,.E3|P*x첿rqR\UT. ~qĞ3kl}`lx>ctGD*fۏxoe l0C82y;磔2jY){ckT)2qsXT ָ;[TkkC_)twli__b\ _y)/5cb -nT<1h~b4=AXmQijqo@)"$( \VyZ-1jr]'# ˒GJ(/ DqGG!Flm%a{fRI;#}ehrWRn=JPrgWϐ]Aw]%%vdW@vv{sdW$+ݹKKj $X3dWqC lwco;JPRgWϐ])%ߕ 3*++JPҽvٕF -C)˰I-&f AJ|26Y{چZ~S f w Rd'smaڣG\cS9.#3߇EAT UN#',WXE ԓ@eNPU5,R䅑L{'٤1'˅xzi}Ma~a0{[OMxq~1%JeqCUѤ*JMu;xaK.l#PbGU/L2LۿtQ\`! Ee+hYe\I(*gsw&w%x몟-~N#wj2FA=G.BX RFJCQ2pjLʱm9&r6ȹQj .cZ)"VD h:3pN]^[`ꃲwSBښ]Q>?}z^yߞhsV(/9U|B1BG/R:fȸIvVsΚ4u@wo7: ӭ-q4M5ӲY`j~fp˔eðNd`k TbTJYф!@Nhxa<߳gy6ΣQbSgU qk H`oaS$ is $ ޟ-߃#G sz+DȌn= ,pV>ZD/R5ש %3fOQVvwzUXC](I {)E][v{700=H_ꂉ'nݜؽ ?6/c|s|o|P Z\48G73śp.d0FwuWoo, Wo޳1,ߛ0MMo֮JOaO(_&ń`4K6̦;M>ksy/W#da)ONj%?tw# wx1w Lfz ~R|}h^yތ}qcaՄL{ oA(S Sⓢwy .Z )j₨]ah& C9au\2‡hYa_o'rޏ.vRUiڥ?ƣPyoRJe$^sjJ~+yq^skaQkfp>}S}@fO/(3mcu8NqMd >T@b-<~h[ }S]B~I ֜ϛL'2:mp9v9m?tbgc7~9~x6&·-/[:/-?o;H>o$ǛFeVݼA3]1; K\t-s!7Q#70o] R3Q=1Lo٘ O Bcn.Qc~.7sv6꘧{&O  J;-lP3n^1o3cSU&S$szh.nY5ߛƝn]+K} o[Po3jL. a)VV`,VbL(% n$RK%{K'jˢ%UYπ<}ur?0;G+wm$_!;R?,`.,wƢղ`Ҋrp{Q(TS^3Sʥ2ΙP)C9eBwBrCRW%0nJgZƝ2}wgk/9}em q۪˷|u4+`:Z箄@B :Dbɪ&hPߩ+NN?u8$NcټhLdj@ɩ QEY"peҩr}%bL6I$"FGMA&d )e /lHks "QSh B iZmEpm--Fb:гԱOYμvhe]7\d9>~릧OIU:693B&$Wh)ʬSPuսO>䣔Qs4" Lspm#  L6ʲcuAw0 | IKo(4QTς(A[ t`BvҥbsdSR8_qD!F"xXϚ gK=!َU#ZKn_@Q"e rpHI1kƂ .*EFpP*[NCO:[gHxTS֨dR4IL*A">V PJ' qJy{O b1=bh!jW.%"䄅=k>$c!Fkc8jjL4'Ƶ[YiL'=PB3ke9pf+٢H0bu& %KڭyF2j- (_3U|c[.LB=Pa،e 5_\Qȱ(j<ܠcƺtqys(LBv:m|$:NjOĻt.*/@YMn,@D-;Y$ &-MԖ&6UмdY~5_x|F(})*vB99b@ f/Qeҏ )bR.F*dRN#I }*Ƥ^R"k@GDFD.jm3#J]1[/K"CkTj8[2GX"Dr᚝%JΕ0ʜB0"Lr`Fe֥dC)`TObݒT^,E9)5$NYHcs FGG`^pR}&4ot$gpmIq((!UVE ,`J}8OrFU "^5mH2¬֚ ˅WE!M](]ZĨs NT/"4.yb8;{~;봦q&ӺMi\&yn&{yRGYvt=kjf}J*tElG\ٶJTTwgz ?hIL9:m5<^5=ޘbS7} E(h.G?9oTIOP;[9?^ >0E*3`&݄:G5tS:㛣錝}ض8`QكemB1Ქ~g闅(b4B!eTJK*ϗw{~ۺĞ<&ijJ;-Ļ?ݘ8Yr=[晪eOgjr7dM|| p"5CC*OVKoNωz*-k{w>^dy-4Zd'ֈ >/nras"PL(^gt3؈ 1P8dcD4(EdH"hn/-/ AJɑqQAv8:+b!@թֹ[1Ðhnh>Zf2nt x pj %P%P=W,_3.~ݧΘYiP\(\ LɅ."p[[9j/b> -gB~|bk5sS.zVi}swDBDE)Hdd[Prזd A;'%{ݘ*WdaolA(\9A*7QZF)!'5BҶ!⭼^unzxtd_l*} Z?`z1e^ _uӳI]Pߝx BLI($h2*&PђsD<).tlnt=BNF(Xo7,OlZ_nx yrN`ֵKg:ͨ@ف~!ML}hjB{;m ǟǹq/4&WG 7FwR6ddW?6'>~ =wۜtoGfi06ĕHME \xlɭ-<^ yt󗹛;*`wۚuޒuaK7̗ ]S{'1LkIͳl>/X?T)__󆻖EOz.ȳFކ'4ck5^dbY gv9#7-ѣ"„?N"<.v7;mz,-Aouaand{:d./|Q˭EcfWF?E|aܼV+\<>4n.WklF oF [dzLJ?mӯA@Jŀ&; ZP?7yH׭XȔP,Ҳ X7m%o & cM U C#6&^]|oRaoeq 3/b˵~bP,AHW>d! IRF{)Bװ7%U9qP&ʻ6* <뻼\ߵ1xzSz1%y|szs]Ä: lR.a:k`qH؎՞DE:>)ECR;vWE FjNciQ"HG#p*!i &6d1jGY<$͑b ڐpmuVҙ-v Tt1S(:[Aaʣ PYQ4I8A6Ζ VbH\hNHYe 8^V~o뮳z𘯦}7y00@>ሆ.{_-I U58e0G H+g6 Wk~Inʼ}̓ǣeBU/?+=[ubWRnoss,N_ipXNijS#EtZkZ·\BI!h*E ꨯ\}} 5{[<Ԍ~Q Gۂx|upk4v@Z8.9:?]s/]-viMZN6 #eC ]@cq@YP갧=~7ݸI](@@11z%tbURIcێ() D91#H"*Gzd ^FX;7*6ΖSCZ {|=ysV>h a\&a(O9P:I%adYOd)SPI&Jb6΁hrVԚtpMzz'U nCi39ٱC:*iѡtq TgW7=Fh6^L,~ƞq,3iӋR7L|kmj-Y cl&:GbB0 5)K•S [D yoGMcBbІ2#C E'M5H"2pI"qPM6p}PIP,b3"BcE,D K]Ί#SiS ꂵ>' Y )FHm׼ڬ-plLق*BBi5TgRjw`%J"6k^vq-rͤdW`%13HJYxIPd"B8ٔ2}C6",mf !H$%wCJχzlIn[ݣάVSU$Hv&}Cjc=y{j4p=N9YUpr~=!N?{N?.f`|n¹ ;8u45R5g0*LcշƼC&XIPI2&|@yAtޭC^HL%T+s!K/{~(oo:]qr6I}}A e*N" pJ|0@җ;Qࢷ 9~I.=?Mړyɠ]0Z ֪ ؑL ׈TGMU+Jc#K/>9OCG'y7\~a/4)4Ė["FA/'L>d]c HYWkf j-U)Q .xa2P$H.`"J o٭b-ՂXA=*HڛX+|{=nqyNY!d[Y,λzhK"Ki h}L"Zҕ*^'Kvl/e3kvݿ>ܫw'du1ň2PNqnd̍R4Ej%0ECR㌢&Z+NWQYq.K:Kg-@ډcWk'>Xw+U:T1X;}&:0Z<7͆Q>B5eۏґH~&' $J(IkJp@9 '/jgITQH$]¬$ #)O0 GMB84"R6J4!#^$S.3] !n[3%!p~IR~}k#~=^Kmt6T6KW\N黴OSt)hMq=&27NZP:i4Lr_9##OINld]He4x嘤,RDd(IkgĦ'$( S3BqB#::7N!5*[ k"@KZ13( d.gEF?\iErya~lCtvBܶ~V-<ԎjСD%) AըKE(9;~z ~Y$ëF2XǸpqZN(Yv+jߥG@€>x="P誢=T=PHWHWB:q@tŀ C7UEdQ:#]!]IܚuէvALОX0 5;sbvP}oB[nf`dėHX; +KeceT`2*Jc2@!pᄜ+\)BWUEiic'64?"ʪ)};Kif2uDs筿 '\o_. Lyh+ZoU뤳.y_^ްW{> "?4E p!oypSn띞NkRvhK .g[/mxV '>`|;UG}v[(ܣ,msي'LJ-ۯ/㙷kZhlvb٣9b*JL$(_o=eSpLqnO͏|e1@Xħo+:G֮ꋟ럿o52?M_NomNa{Yp F/PdV&0:8B 5FuD/?'/_=$dP {v?]~2V[N\\u cpXtrm q༒"+5zPP9I9X[oS0N y G)'wPBB1IQ$_}gvh7MvMA*'{?@CE\ @5]2ϷV^M@E9E'-9 ,.њ},‚ ]ΉWIy1e7B<A޴\sE-IuuhaST{Hdt}:3:9&z7'ߓֈscO1Ǒ֑ ~F_o3dTM(- VRjH) AF5K!RXuݷn\ D@K^Ok;k^VIC2j(UÆV9[0f %xD ]Vzk#Ҋ IeP4BZ%lt)K (<4{4:<<5Gta0P߸+e$Xtypl5M}`1uV6`Wl莬Nc[ 3R#u =VFU,*r Vmo0~Ew R5xlPBr|\1fPTԃ,0F!NHe9;3To˚S~s |PV11󃳣n.0QCیZI|tx݃Ky@r *}tl*MT$];-6*d`fPDd~.Y. Ao%CJ$JET2R,zGpKRQ &(!t_15lm;Bdm:zXTuaV% 9աk4=!V1dxaQMZ uY QH$O7~rs~wߙwyv]8r VL%V  XF;KrIn 9(P"QPG݅ZR@aF 5ܘ j=`Y+5Qv5+ڱ!," !PVv%m1Y5H#VF/C̓'"*`EVF9KP0'\qjh}6`^m^MgE>i6s=W&A.nn8c`3 =k(cgUCŨծ[s5|5 G(γF9@[Fm1 P=4\}YeF٤#väDy ֵؓJC6G=T3ʍڛ_;:)jVd*! % PP͛vƾ:B_w?y xMRP'w B0Cʟu7(oV1Z8R.|.*jJ':Tigus[sO*68&գ-19mp@n-+5>Pc͚ Rת%_6ϙ3T;*\Aj},ޣ~]ے߳jyנhdx57לAPK}4 ŢW>ڡU!:ZSDàe [̀|12 =pe6|^HmϟhJ7a#5>dNҳ֞fJOQ!,yJi]0d@b- Pi?bt~֘-jUev4& XTdǢhBMMH 8ԅΝqf@Bׄru3B]wԢ`|, w[x#cl6׷[{Tu&f"GuSNPБe[?}US6]O<&4MsG nn/׼7X}yq+)n7ح6\BWwk>?m^{tۗc?ncjsu#p}j6_Nͯmϵ#]__lW?;ֻOmnn=vg|ۯWgh㶭׫?zt*Nam?>Uax qIN c#878 ;qWI@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $NuA/6|Zpb wN脜@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qt@, !. - ";l'):o'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qd@( rf1N b1(:A'vS'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qt@n`[r タvU\\ ĸ]KrKEc\Z޸މqKz[]noЛϸIcJk_ڠg㧟n|mC6WW۹f{/0梽D*c`\`⭟x&9BNƟYZe_LT~|)-n=v\]Cs8]!2k4]_9o˄v?Iz{Q/VW?yGw'ݖ̻'O <̷_lI E)릜{T.Д2o8`[2b;')!bS NRaȪ6h1z+} GdAՄ|Yv\4d-䧔ۘjyj'pT_7kS1\ZLmh;ڌQۉ:ڌ\4+1-h{駥Z;]1J"+ޥ{ba1 mLNW@:A޻E.nPK+F±w:E AoDWZ ]1ܸmP@ѯ]1ʝ%BWCW1*S?]1జ+R hc+Fid)UJ%]qYb)m2NW@il2Еȡ/UDϿvu` LW֮Ci BWOz: +9 nK+c+F ߴޚebRZ ]1hR+:AJ5ٗdy;Zh?Y1ڜ{6V Ѩ}\; #Ā2 `FqT|DUa"dJmK2l(u)s]5 cQ2ut +>-nK+5&;]1JJBW'HW薤<4T+տ"|th>vbA*Z]pba1K@Ȓ)U:ђNGmZ5Yb䎝4ӡtB -g~)tVc+F)K" 9)WWo(A: nx>Ket:XiAt1&BWڐQ(tute-pXbBW@u8vb] ]YήˣA +m,*seCڝK(xn;;zkۘk;z$pԋ| 7H>}t]bϼӑ߅Nj/mwLv^f;v˲=U7oW_oWﴐ]ח~9՞Á1M?kLzY>r!ک!:^i @II&.73l9?_s0[+~?1>զm#I_O6̇.<ߗ8r>?F<Ѷp.8fݠ}]߂z{;^gfU=gU墌-.x]RVi sP#PVK}yxs#\"CgY{8+>dm cFuw5E J_plQE[cò8wGx^]CVQ OAh3,Ȭ"H*Ayg9!:R )3h$M`6)HQj\qlF^ԤU$^m0KRb+$j;C"noz1,~t}r>8+koY yI'MVFy~&yA}Z*L_n>n'V&r`g>L2SGTlPyBiSB,A%J+z m-DRX[S28Op FS@&yN%3E'JqϜR9$&SPBEXʦP2j:M{(MZ|CBձm*&`0if"Q.D2D- r.%01' Lu~Ahj* `U62aI&'s0Yn'Pd\a$VI-0/q1h~OAk+ t^/_Z^ry!VW6{r~wڹOI6r_<ݿp36v'#ݻc}\ ;$"r3LW<ƋxԺ"LŽ|f %L(V0KBBȒ4)ɜ#M%f"sB[ kb1j̱Oe>:_L.[ >|a+uݘ, .1̻{KXѾn-7!n;8T4 ZMHG=Ŀo.?{RCX |,h!҇A1y0< L`KCJخH"/c=%l%ב#].#!FTݾoճx1/6K_8c'X[lƴî,˧,MurVmx=cC;'o\$GЕJFTV<R;ڑb ˶IV;1z;Y.foa<×n.vF4 ֟~*ց3L"XdCtJ[+'2q!&\i?xc: ݀|ٍۂ>3i,.I(5[g_[~턓[~>;`R X-Ƨu!l4"̺d֘sR:PpE}b r` υ1$a|$g3e u%T+xE+$0$bVeibJbfRFH9OĞ&hDw S"Eό7pg ng3It~R}wЈ)Ծ"[ P-3JbDr PH/O]ɺvJx$c.r:%60~| ԃ-I@X@/ >nTo6BH2U⺮9R$AвUUpط 3\L#Xxto띺bQv GޤRXHfӨh}XQ 0c]x,xvVv_;ZlQE uۓb>k{ŵU@RJVtrIy v׷!xq48ݟ^ /levR8oM˩CewA);t]ūµ:?tt6X-{$I nU4:3P?:%1 Fy#QR.촓~tatmqM OϿ'y!}hnIJ~ħ9-&L}E(~oS]Qb߱15) ˽}4lV31I;3 ٤ YWaGu{&1i7mg,r^)9=;vfN ч7c<[\'9I9[쒂7D\ƁK}Vw2p.(VČ+%rqr<,J|}QB=p ~J1 =$J2=KLqK19 <1nP qyq/zÍGUݳ^ Η~Pj*p}O(; M[ƾ\)ZP:&ϡ;^kUqRyS fEU Wʔ9h`YrkC[]yBb<tY:|PF^eY)}gd"z5@Q艂:;R$$H@˴ԟ>*!8n=]d!@5-X~U"qBdN \nZ41u zgG:k}"G&tGNw jؓ;Ұ'AdDL FaxI2r>0dÁƜypޠHOZe;lȬȼz,'eV xTŠ#8,`2/Z6ZZq9NB`q ,dۈ ˦(@ Ah2IV}CG!FqU$Bo8o=>B_IMP4"J% k͂,@Y&u Yϩ#%+F/ EK|gv DXd'8(drd2ҖT'ւȣ?X+cU_ u! c ֩/"X#Lڌ'!U3dTtwl%$ qO5wEWG7?I,BńB+g]HHAL!7>m_= @CʶL5l107t!!±&[@6pw`E6)fg~uy~-ƌpABA&S顀&,ل!<##F$sN/sΰ`HN{b+ҡҤ(Aa9*Uђm8JG,aDHCCXeO J B W!Y Z(W }H"X+Qe)A %W-74UR -rIп*yDN͂c.ZR& ,L ]H\ Y S򤜈hu.5R(tem@HNL\ifÀӛ;{,řV(N x􏺸q&iɬ8y&{ei=zrʋǣmn[wLZ$\-'*0qƚUF0K7xd,WyK3Zqg˫ic =e !_WSPf%Qgܳb2M+xt1+)rmH{w%$IWxˠE_7}`]}DWI#RTn@{Jnd0H%K -"37gٶV*gBk͓\n:kreZ>_|jߘP7^"u/޹Aoz\_Oˮar9{QnHg+Tn鵋^23HJҍr'zu@xxUX2{o/B#G;gS.Qԥ\Zo.Fj07~y1sF1 'S0i0h|lak&9=zOw9{sa]0v :4{73ȼo=OnKr74z}xѳM| ]]3B rУӠl%2lnJ#5_|߸(޲俫־2}׽o[88ܧ,ܱJʍ!Vжon4B)Qe+2"1N&hiN{dy)snf@s`y@hy-L g\NH.UT #1)O\E(uZ(K4WI4O"x/nE 5ġ%8K-dQ`P|/4m RLrQ6Edb1Jq+`d҂mQFο >!语6x[\s y}2K:C]ᄇuw} 2)P%ph %΂dx6 y轔NQ~tlb9a c8h,Hf5p|QQE QJ4獯GWEynx!,rjuF8ɞZq>jڈOnWɱCGgBGBkA/H 6_ a1WKQ!,jvb T! v2w4)ݘq$0h'Y`"RfU@l;,}Tى+D&Q| YZ.W,OA@S$cUq! º rʐ 5Ƅ̽kȵu𫙳BmjrKzᳶ v~d&Ҷ~ڙѠNA,Wd 6@ZrM4$s##A&%^zK2^'ӡ7jPuN{_&a3IFys~9w9BZ\o.=%x{UR) I*Ȳ$iD,e Y5ZZ"^u .z&im4M2Q~9WKag}K֛}x?|:ܰ6_lwǛ.7|c~A礕UɈl0OVޯӁhQpJu۶cޙ6JȣA_[e㳯R  C 9A(FW!b 1%B:QdYuIULǦ.MN 0!X]NG/门Tu޳o_XonrésVc~CSGurR)ˁ%F:SF8ŵNx(7|7x zEfW :ˇ+(vh ͙Z_ŏqy eܠ*x!ꦘ\PR:$Bg\ \X1d5[7;v,:^I}/ 3SQ#CHC*KKS65@b)1oZ * JnSE?--f{׷/7vP7}̝T|[Oࣃ灘Z5խ 1P9I鑧&ɵ° Ɠ Te$J[kI*ryq@cGrodTLK2ǘ#0I, Q5nEmn#@p5 ) f|L.2VqExO$f"Uhs9{j3433GOC!QS/2N|xhm&;*ا=e3a>֓]dp\%#x`z4GD fTGY2A)8J)HCAi.q%zq&;ߟm_*;C{4/N/8M21ef-X5|ljgu/=؎X^W+z׻n?/ǜ8?-,{2l`&Y">S)DQ˜=Lr<<́*[z{;X+(xH/0a`j */ XQO]LGbQeQXiyQv3` W`}9 ͕L]JBk5mWJ`]D%Bz/_q1K WZh=*VJۅ_&\=R*f=wzuMz۬- W tGϤU1Xۋ Wh8δ?\+ W1\q]PBtU5RZ{X W0\ nR*xi9sMH?se,o{-6+V:9УZk*z_dzhĻ9F(-/perŧn})X_=o39C(~ś_~曋7?<_ݬ3<ؙs2sz?/xzVO;&/i7Yz1­<ܥL0VjGW6L3t6]ݷ-Sb3,k+v])m4k(ɘMWg+g=8~Mhg…IW\Y0<{:{V` ޗ]}Iq֒3w:O~;*nWC9WvRnΚԪe7l/Ⱦ/,b.tYzvLa';6rygZfm Hvaqo;!aݶ) +.$? xӣHzLflF9}}l~mlv>c3Vyi]VƄQtnSIJɛQWd@-yvuJW+ 9J8O TRG ])~0JϛQWAHWveP~])~uJeP稫 # ]) 3ծdz,|%MWg$ y]7a殔vGϣ\6]_WܭꝹ4i-|p#-{gp&-/pϙʙk7]ݧ);xZ.}Lڅ7]lL@c;W(-u7]M8m#sAʥ+GDtl ߼r7[-W>*3l{QF8.\r~QM ]ѦV=R`@R`qJq+M({?xat<6J)Stur,lFWk *_2ȦU r>ȼrɁ&rO|Iaڃm0ǵ̫VɃk6E-T/%ӥȄnY E Cl4 li.񙴒>6fYG0-o 3qnԝĉfLѕEW֮+ v*09*Yn gզ06 l߱idz,ya$̣]*E)fe{bxx}Jq9+J)Ctu3@qtFѕY@ /m:C]9s@ qq`rpL޶{Zf3Z檀/Q_}e=PoJsLSM>#>R(]q*zE'gqj5L&J5H<]xɞp<&}Fr?MN'{[OMɫT ӷׯ^>u~_z\4]^M7hoo~FQOɼv;_mˇF>?:r1gwf&v3MZyO>i3~f ^DH '&n0ÌǕKJzxk;0uKp%e2!CH sCi_R6]\@7RvY +0R/J)l:C]7q2t/~ u)+~񩁇J)7]ޛJ87gkɮW?TʰeW稫jDa])^JiC\@i$gd 9HWdѕ:3VV?wq >ZPl] C5^_x%>^:n^x=}&(E5m@{_R?{wO_u(cA K/^6C!n#lc^gPbk $߾h/q {1\Ȼ;st6?oŻ*&}'~޾ݧnwn_|R܊]i.5wooī~ثw ]#]@$(e98xkY|=g*==*KБ?"{Z?O]aq5ko_׻쀭7ϲrURϖkv8uZl!)řҩ8*~I*eQAߛ$>DGoй_^,UwC1PQwloq92UcM8nmz7)#ej>E/lɅ)vgZ*p )WLn}և\J9;9O9ⱋM:ғ}Р>0 unc\o S҆sPM,R JD̝,RMTrr9"X9JK]Jr}O?":K}m: 7D&p@& 2)'߉s7fJ m;x4fTd9$\WV1qjzLS-9 -~hB0sb;[ۑ"S h5J >8dl U3Ab,ѪmbjB (jP>ixh($B8<3W@#>|CEE61\VPҩ :Dy%S^b)tCIC"<@ڪ*b[O{LGWR35Զ\fD%[C5]u}o)S&nNb'Ss38ut~Ƶ-eTTM>F06$JX#jR !jk!rp,.[Y)- h4> DBbuȦ5nB3S:m z֦RnqDjȢmEXmBroȠ8P*yʶcbᔰ3#\-" c`&ttp D"^ɔN YG1FP\ZDW e\AiY-2R)b啳ی(,2fLiWrvH蛑!{ø+ZϐqGfa hQC:%>BYJLdѾ4ML2$̾ZSN`Z RУh2혘:$s 0;ZvpI!0t/;Gn2F HqX?% OۃM\H ސtŊa*.g D]SFw'l0dC?°!() Hu~?8;FOu!)"jzm0shyǒkBr,GSD{\pizYg%! :nݰM6KŨc{"A'2gF >}& _nhobA\fΘM99|:bPTԃڡ9a '$3{` bn`?yۯ[wsَ/ߏoIhh]0"!bВP=#! {@OLOB6%.)]ҦV*1yLÐ' :;AB>),q!V2RiDdOkV2 X,ў.::tc3LPB|fs٭zVzAn1t|`1 'dNudh޵ٿ"6)FϢ{b;AEBd#N<=,]Xd+*1TEGTeXŸ,+aیl2Ũ$  a|bP-;ոs'cfA[WGEWXktdG`6^oy䆘ވ/kנTGn@R@"$$ePwEx:K6z2Z]+1!,08A9DF'H!uV>fFMl6uy6GrTHYL)bL Bim-< B-rw72Z$p9 ~XdT9WF8gb dM'ORE f&Eot,/+mY4g.&0QIA b(›7 T[bW{dV6[BZzn$LLz %/&פ*+C::z;X~­N* °ji(8ה$$`ڢH7h!`3 [NE`w- ekiZf Q<M Hǔ Nl{oh 'r7 J$𐗘7 $):a6G> +5h+:(1Ő5zB!5RT" wBA)7վFwY$ú >/7yE`1\̃ yrplMƷӘlD't+F.T%-1<@JUgGdGXzր U le?iG7ۇ>&m<]ul:3,-TsPo WG*zvܡشRg|?n=O'!@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 :)%'uJu d18,ѿqt wtƄp@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN wtU ;l;:B9C)q9N MN;rhIN r}/'~kA?#'7nn:_t @"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN rX@9t Vv rII@@89ِ"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r='G`}Q9jZjϛ9}//GG@uɸ;d\\RYGo\J-ȸK_t}9D5Vu \ɺBW@5UA<3+#+ֺ;5QW6? JOtJ 6Fl0,.YizqY@!Q:ACG/x@ť?X_esd8Ru^<^o\%4௪rPDk2S[̕.DU&K$k?-XcףXH׼M? ..r,E垷~E09Hm*ŤBȡb)lbLGuxusOJYl)aT!5Uq&E}T:!gmnU'd5u]5" yժΉ/2\j*q޺UʶBV*:ec]7,}D%T龖}&{c__^&҄N"&p㪯Y?V~x 3Fv(7`}grWɮf@ѯ|VcnAV|ugQtUR21Mt Jg|Vuk< ]㝡VQ ʍCW m =F]+\W誠5Gtҕ;!2e;CW ]骠#]91?CtU*h?v*O\]y'Uk;ʑ8v;%S?]ޜ0xbS~p:S RΠك C{a]U3tU~1x *h=;vJ3+UCtNUmW誠u骠<6 w+)u;yPx~jfӵRMTW=b_Uo<‹~[QtUyGj*j]_J63|qtffLiR!fFٽ 5C*NRTάUFA&|Tf5I+aPtU:*=g骠U1vjufh%WNW5HWZKK՝w-oHWu+*p ]NW?ztU^ۮ|誜sY+tUЪ?TP:Еݱ RWW{S\RyP#=]=C*p ]骠wo&y]8o^S]oƿ^ey߫Js:4"g:,&ga1a.:E7WSidvWYjŃbMLXn"'H= NQYQ@6u\|Jt/+u* ߧg/e2FIJ7*W] d}.T%f"s!4Q'S˼}3gQ؈8uud~bt(o2dr[/G#{v/|#/ 0;[ŌЍ㚪\1n)*LNzY^.pQqrե܋n Uq6k5E/uDc;175gCפ h\ti$hdB/_^u/&)onӳ{})7& yM^Ja@8CXŚߎ,^/ ӄw+n3DQޫ"l47r2ɩ[hÒ~xdTR r k){-K)"`Jt6]L\x[%OQ%5_~lZ ]ɠ\S>!o~C_׋pjXγt+jvޖU{B1W ۾}DmI#V{}L=/P? 2[ӱ;3%r4oóz&Y]8.G픶~y,QRC)*{W?_Pf!H8Ly37V WO$STq&y]s*min!)U6eo9;:q|9L-׹7Ogyԛ29~ꍓ^hpчh)?"[Z}ZcL϶U|!4IMOonߴ615ؒAT+ z:u4+]*V[Ԇ2w%u4ڪRTO$ƣ=?1WuVbIn ݬT!JYm -u_c^>~\Ϗf1X^U4oքMhb)e36Z7:5RԄ$}Clr6qs5ԵJe. M⡩Z/ Ύ~)p*(m8WIH2Iyum t0(W^:q3a/X1%A=oFj0Z0=/tʮ6NNʶ,)?-iYF-_򟦖6W$2W^#ZZ58ߍ||uP^m~1S}o,󳘽Bb.}շJJWlϯ/:,)7zg0Y5U, @@ugrj~(4Hٍ͓Vv[z}9w5}޶5^n_W}5M|V!*ˆxKY>/thCE)kǢ7&]|3;M%R69؆Q|P W/WW.9S7DlͶf"Zw/ŏĽQϝ0(o2h/A(aTvuY&/4_^zwn;zf\?{W6>,L[7= 0=쇜T9ȒN,_5E*mيBL-jUUOz0lv67ڡ@p8̀=&rLt'/R7g XtX*L*s#1Z=zɾſ5cOz-Cn*JM8*ah6PRfs%sLȡ> -t E=XCA&3!*Nf5JԎHIQ19Ǻ5z7[l}}h طsac5_qrf?]a3Sfck 3٫\٫k9NӲjoHV"q`Iz`kƴ%%D/ &$JF'SYJG*RN4֤]HkB%]HrB)XJgx铤.)PMtXrɬOK22/aYZ,zŒ`.:$8a26;JVP۔cb:*c}139imÇXO`;ʣf'V>ɘFf-dR;!YR!@Q[2ZD9\sL zdG2La~qoY[|&8PM\5l߉`@Brʂ0)\$%gj}>&m`hy"Qۘ Jsn!a 9cDBRfYAFwV`-MF@-E)8Ed46e02*Zz֩(o-gmApeLkU,m6`϶딨\ KJlF@/ QfV E)R!:mg)E_&ga>` Ri(-X`LI]NdLb"2sZ5K3f0:1JbE.xU,uou }+t}= ssJoqB!oqB!׹oqQ+D{p*' R4NuIZ-/OPȓAA%y9TR{Kz͐7,p˶'l6P#0AlE'62iyֹiC e#r4uCTOYX][|[kfέݻٲ̹!aЊ yď$Lx +Ddx]f }2{Dv18P[N'PPhS-l9!ù Z U 3芜%^;Or qbs߬| Tl _1aYܳ(}%~G7]TYA6'mPU2j^!JSAeʓA5/ԏ# zGqiG<-A~RX*qI1Ϛ|7dWJ#2H+8 2y'ia8k6@q繕)\"~\'GN+r6YiBv'\?y/j8ƒBo1BkHBL#0c֡A#r}R bP /E1Dq| g1r.Gtz% ":Iw^ dIb#!FiRf"g%;pW$kT%S gw:#t>Aܯ;gk%Wz"K>; wh+/XR5<.'~ʊGB%VZGD+J&I#)9R$Jd,:rYcdF̗wqP={UϠy7\ـqE6aeˤi4Dϵh c*Ӊ):i[J`8{fKٗ,Qg\(Ls>3aSdS.% I}/z#~jZC. b =FgE]{v8mÉ^Bn.;h{}j9a7l2=r2y]WwZC&"?y). STroMjsfd . )j"h)܄iږ_+EOddm~j2GxsH gzV'诳ղYF78zJ>QYaFL=o5>VF59_ޚ3ֆz.V?%l lfm3o?Zu)nS"״RIR/W⊖c~e0I [DŽћөh=r=Y /jevQ4ֱ-'OA؟'%l==W?OJX3tέߥ{7cC]4xVGuw&]OKtx7H Zqo\W~s޾znZ CcuEij1환 }Ww0eS AͅOLRҸ-Agj||@#`tMypRV)bdZBs[ uXPNUϢ%VY/@:^ Kpe2e:lK(msC"h!$,c!BW80jW7*iӗ;>cΗ(ICvdG+ hXd4vԡ#83*+KEyUTZ@C{ԛ:ѡ`yI3+L0\f-rø<[z`h koS%7AR&MB>ĹI: OI/Q tFFhѡ}TMe=XN/(ޣzbzy@eQnAfQ:$t2c%*8`3 :)AAr;v$I#ZhˆЍJ2k\.`tFgQM궕=*$ 4kZsbDŽgelS2VkC':9PjObvdTw>%'wf!*V,3$L`Zi1Q* : ]>@O?wOD :s"RChU %OS3S+4IH,0|VI8R2NKZza.Iɠ1L++zgRfѺQ' L-TYMaq첈Xy:dZ %q GDRI3IU%u" XĄC1^7֙|3  mt)yy, X\ ĊSBnNWk"ƽHi"gpT\G^L^3Ƚ1",CϥSx_d@5,c+R@hXtP͐fI_8dq-XN"e2|EikὈёqƒQu H QQ' b09Ivvt๋D"$cS36.ɚ!Л1?Uq&UIJ'Uj5#lUg1>Yed|_͢T{sP W_[-H 4X`gq>ݔ*Yg&lvQiZIZK=4~t̰}0żF`ʝ~V LzQb=Lᆟf%`/i= F˔2**Z`H?iףS eٶk)I5m:ۋ[7uXތp^?.fuٸ2 W m)^ 8`+@;0ZNqJ~zsZ"wyjεO(IsPZ$9)䒦6[r M$mX*f)@o_F)W_녢~?v9v<~U/jػ^?v SXb ;iq|<琹?rZh׸!B\LR P<HZD.j[; "*NI/ho={28jh5‡Y3f͈J vڹeO=⇄'! ` cF%vE7rn@sBXIvSFZSN sjۄ΃ygLDkF >r#]^{g@Μ2\I6qm\ЁPo#?ʹħy 9I$O'=NCD;Hż{D$Zٲ%LGKr$ C5eLtMV @i5x\*q.ƀPTlP IQ9"<S_+yт)֑BS;L`s*^ @|VTzgy''էvLR3s8wac#+{XgRCG_Iwvzy1P`Rx>dh4F P)x=,HM㑗k.s~݁Ƈx_ݴ-~Q}V/OpN!xᴪߝ;3{nYi߼^ G+3?W/=+Y?[^Z='-g54]?O~ ztww&05t-.n1R@RqE':W?n~t-(H]!Z^;l-)a F^[ K1U%LIqlr9HNsa0gBjRCNk#΋`ݑ}UՕ)okVcvU^WYj 9MQZ* )(C Wl?$S*L3pme7m?kM`#xJ=j2xy{I.?o{@ Gb7a8>k:$C m$uT*I+$@fEϕ l#m8guY&nn7/rAGk'4lFUʙCRH(=DZ h@K l{ 7_[y} ]܂ WYOcKILCX12\J9jSO?Ӓ=z9Yz`V` agaw f^FAP11)@5u z5JIn@42G_) bTmAN2Qf$Hu [i_n Κ)n5N6t[⭼˜l~puW''SL0W~hN8r"W*1 ŒaA9&șmecNu2T]n0/GN{Yѡ{ԋiw3NJŴeYW[oԦ@Ihpi/qֿU&E[s0]IP&sN566: 6T~S ]şHj]%K[ҜXFǒV7p6]1?7;#0.=l{u e$gp;gӿj~x9;6/9.h-D<Ÿ@wt"8m5m 5^4 (KMeɇvwkEkvU eU{ y/M5Zt&'M[#g^m:M{&l/o&|TD1Sqq,tBbTrKf̌iָ5ؖ yυEqܶmV+-,zCmѧx36zJ2?ԨLu1J(OD&%Cd ϩ<$)Yʣe 1dgFsL\*/IvDRBلl|L+n4 .fWX5 k{lUWZZT)P e DaUn.r͒$iA=dTЋ.QAlQnÎq+h#a dcSF nK/3І?$s-hk{2 ޅ9 #wVR*!Pdp/%+%t=+eFIǘRK2 8x9It3\AKV\$՞NXAtKY ]eB@:OW%=]}3tz}JfNWVW=3Z#CW Еjۮ2CW.7UFX*4SSR]!``BR*tQ.Ftu4t9 bvr9I}5.0⃚n68*?Ua\R> 8]mXCmW8{3Z XtـLOWv}>K ؈b KUF)hOWGHWRnxAtAb*ÕtQ򞮎8~ +,_aEWW1Q ]eyg0\H'LW:.p4\$}Wc-  .bZ'-x4Mi674&(AsubIP{0b'3U|\I9wX[VmSi`TgZzw=˝e9kdmlH5#<P]:(;*. VfU'o6PMDI r3W=rkf(Uk&P]!`aBVт:]e=]%]IXAD`o~d#`T<~9洩?M9XEmN^~?Wj CrUu7+|E ~߻V7]9žKh|6 uR1V=j/I!'g'Od6..1$eye$wgN p6}RcE'Qc--JykoJV?򸾹Y$}B sRhuUJuJuC$ft?KE?<qz^sea-6k'/uLGz+Ui_659/Fe2Ze2J#{vjMX3%!`UP5LBW1+DɄJsEI n WB)tu\z:B2@5'UlQWb@3Z ]R'B[끜j;W BWvK$춗BWЕT^I^mipvLCԖ4n@WCU/=TK`CWW .tEh˝vws+Ж ]\\Z;]Adݡ+Br}ޒ@ 9(-5'^bZ%s)MW9TsL-J0.es%^-ArO=)%ו%QlFg!\ wfv[ui(!lc3ڇp>$ku&w".ҕ XLj5|6s\ :r+BҕBɈR|+ lAZFBA]o\v'wEp=cPK;]JЅv(Ftl`zȝeˡ+z/ vJm_ϒmKZ)tAJ!s@WCU/& 6tEpBWvk4 ] ])Q3+l$" ]!ZwE(wt֎]!`Mp}* "w" ]"]i!`IZ^aߧR65bPKY1 ~G)E( (ekQe-(F^vOʙ eJS]2 Np+lpc!!Q܎KzFtE \S΄rpbЕ6hN՞ ]Zkr+Bu+]yCFpBW6 QzY6wh!#"[Lײ9MhΝe(%_]H}ֻUT{\G)hP[]>)@WCU/Z۵)]I:p"U7p+B ;]J ] ])]!` ]\+ ]!4+$yR r:[\ғ$.2{ +6)b9^bF29{R5 Z6Vg( J"nֹc3BK*ic30iň6tEpBWK;]J ] ]QCWW.tEh!{" ] ]Ykp+^W  t%Er6e|\&$!{ QZQ]+\`DW9=]\&ar+B)wt3\mJWS QQNmHþpU࠷]—% S[GWi(s tze]I{ÅqR޸ O%]m"NBDҕV.::—$jp- ZX=^IJ}p-FI{A$^)6^}yB^%(E/41+lEױ+BI(]n;dLBFtE=2ڹ$RPE  NtE-"|R΄g "J/J0t兑VABZPN "xp6tpT\B ,DW&Rf_Ha$?ݝmd * tI+SꡪJ{ {n\ r+B]腮rRJFtEH% \ ѪmNC)wtS0H=`:!҈BWHW*$I;Hfxs,6,%49^ܒd{x2-Lu_WIպq.5:>5IW_)!%|Ir;C{hg _q[OhuMv3G88_j/`};~_)T_1FA+91~Mm::X3t6;~@/ryu)ճšҌsIF%ָ coڮBqLk\6nEO}-kL< < l:=t@ ggiYEOP J@s|f1ܙ=Vrkfcby}'K&p:|{HA2/HO^r#(WjP>Bg6?[~8u+=!^ߡ?gğ/NiM"w-5GڽN^B8xR ԓSssd&V>oSRJ:=:O51\L ؑpW# R2Es"z}_泓5N 4L9ɼ-!=0FAU]g:lpX>bpìE%>T7hK s@~i ZΎ?\YߝӢsz|2ϋޏ0nǽw1?# ?^A' g fՍe[1xs=&Tގo=ߏߣ_['N= 488|iσ} X/\5Ϊ?U3qՏR}JXO9|)/(x>^zuv4꺝u*ͦM<}Whv}'hGV8+='b`JYEn݀: /T%WøJ!y7cb=U2s{<Ǧk#RId`aJZpt9+IdyL++%hz! U"V'Z%z 6c{vÁߵ翜/smBϢbQKSAZJ@d:`~=;? >)4TR|㨨,UO+QAt~@`Y;S S gk "N?Ggi;vg-@=Vg0?_oWKgϬy9VeA|jW<nTNTs֎x]ee/\j%>=Z^ 5—ӵ3@g\}`caA[;u\g:0~楯{דq=;޻%PD>bD*:5AX%z=+Fl%꼰.<9}|1 >a!ݚU FSP@x&UPM@IT)jx4?!jEuzAX ͻ˱bԊFs5wnc#jMX]h"𛟾?뷯O#tv);ѫ},,Z^:ܤUڷc'QuUH'[1󕫜݄ᏨNێu4VՆ.Yl62hWKHAU4C*t¤jeVQm0} z^Yo_ vKi?yWKy{J:~nZoehe\ &ZݨVj NVKhjmb5ƶU~r%Q?cߏne8:fV?g4?vy1 xu{ hw]Џ/j^}lo:/r徾AR+3$q6]&8䓪}?M]wHOV4="tIzdI`L=\$W0:Wֱ{d%ӋqM/4(eKG.-oq7h{#R(=XK!|Co"7E 9roQsn}0fe%9(O6IN_ ӈ81}gO;ҽjdJb# >e?I%nO$Orᪧ 0qg4Jcpd+gUc'*N{ 2bH=S*C7/I!u'qYu$r9_5y/zIMY:Z2UC\e`1}ٜ O[i(g,2f&f>k08,5Ņ\B&YI괮a&i֗LK5~}ܖx5*Kkm+G_EɢmgMR E}$%\kR%MӰȕ ۫MEm! T|5K*!|þ|?nC%VTTb-*?w kظeDQw[.;]tMh!ꉘHu#ՓքS(h;-Tqڷ7[\&.!4n !NIh&5J3^?@g>eS¸NwJZB0j]A|%tTh{SVxPԿ+#Xb#2~C4>Wc_7A,䗄 >H/AТ Y7 %Nrs6(liD1 d-K.I(Y.bM>BF֤/P{8#e&H[}iD7?t'ts}?8fL-XYXdtN1OiS[B0_z2#T:wtkDV `f^E S rW=F! b~h֚t05ܗm/*϶Hd7: Ԍ?I&/fc ZJ`58$E0(դxY<ݲZgv7 ֜q#,o0H$K1Uiqj TN99(pzB!)~_ J@[R_ S2nS{^~0U `{4v(f{ZGZ(lpV8Psa[}xL?a2) Gsjzq40˰WpS <;~hH$֭O6uaoMžlMRnK(\%!$ŋ,0%5Ҿ~VA|R0=ⒷcR*>B@\TO4vC^J`?U\RVPɬOgA;HqFrcjfX5a0.noWni \LC0bw;XlȄ0L^v<`̋tXq t I aL`O t%gX]nX83\/?BhZ QNc=чҏ!,IaxK[WBh!d !Hqd\ۮo>LJ%R@iDGzY +&ec$[ "HGď`, u܆~0_~7\ÔFj Fj`M$±}K ښV`<}06%m a'usXJ(:(A zB>RXdAD,%O(9B&'7kʨRsvbn*9pt6aP;s}UOBHf;kާSu0uiy W%=C\.YҝΝh*x~0JE#טb `3B&+]`맀`\kG:h,T`4MS9[͘ZMCH`q!1a:ht Jqr#+ aF8gs GIIPdѯAI$JKh$"[\#Q-/XWf@\h:ZO 4.pDc$ɶ0<}FʎꛕȲ*^nSJBq_g<\GUjSӗ.h֦=P3]qM{YZ)8:".OʾkoނIeIDUj^y]jLhym9Džy| b#~,E+x*Mw:oԄIQCFqRp 뛬˜9.{&کmRb_nƏAfe9w5̂P"j`w'lW"cUSqDK0{A+kf&~Q25Mz[ ?Z !(koecASpԿRW˼0Vj3+aGRj&*)rZ2MuYJЋ C]}ec~UVh0>awqDc2rs 8p50B0>> /=HF#d'm}f^.Lv-FKJ3V:a A 14FO7 kAe1ݖ+6a&d5^q?KR/g | _FjϏjׯ_.F忺ZO;c0u,2 ,)&0%g$?iY^zհ2F N JZPR-J2:E+]2vJ%[<݅p$0m8<- SҒâV\Q / ,VޖJ節=nr{2gWDY dgLsvLP.XJIgcTL)PY)PHmw 5<}%s aYUr.s@)7IzUqu|S&5σ0/X<ܕ+Uq r5:`kXad[Dmci\Xc?I:RL RvQ/#U Ɣ9x?VI^;+D B%p ]ʂ;R8^D:3wgvN6©Veҗ2v^^@ LM$9w,z4M 1etL8*+eq~d)@!d14kP@žjJ wf;* ^ÍR (/gB4W4*~3PV+@80Hb&aP+JQdw}C}]1~0&qƙv/T l-ǁb FZt7sr'`:q/P#~I9 Y"[ "av1Wn*"۷?euq&EuKyPhM침ᛇGD !(Tq *h Ņ`2MI Z;1r|Ƽtqs^ٮg}XzEzBFZnYWt,d#Z4si9 LH rZfVZzN?+N!ltJ[(!)1{"SRcf5ť<=lpoPʳ>oB Fv&!H}u 0ALzD<7AUfX~&oM{!> S @QS0 PQ@-Pb)RxP0 |nEz7e}U$ Tu]#u:i3 Vr+a 87KͶ~ɚBSP=n(O=PX1 МmdiuPNtnW吁ZT(b@`qg[zin|vFImS4qEcnm haQ֦7Zr|W2,ƙ;ּmCR+#H*CpEM *$)#GAY_j7AN|4xګa+c˶:?5J&6'n|X@RƇo|Av21-k6.2Xd80>+ 1 H d2h:(b#jA6l]#K}c@[n8NjB`uKCMxv0 367[}uA[(ifﺗf$6ٮ'* ?x=DfJKЈ(v`Jg*X:ՅY c@q-8dR)( =18s{b ڙւ MKcm C9Y6Z2]GsF'v U=}Lˎ5.ӆu>IowC4}mkOLZ}kZc s]DhQ#kos&@=!=- 7Bi]"gPl8*ҫZ}*~ ~Tv3ukJ܁'^+XG.<\݆ #hAou1=H( MĈJrR@!>!Eݹl_kts*-9ay,%WU:;IZL?Ay6m-sv f݆5oAC my.) FV--ߜ+M'0Bb)8AZQe{ |V-Y/QAr,^u,/c)e~"^}"WedK)O 2Enͫc.hqQJt-XܖsK**dBjtD'^4{^huYcݝ={ Y!l!+QϹ/Ӂ*48փ+gJ1i]ݩ~ K;1Xq3bz0xS+qt{< )Sէ8 w_ uI `@> %%Nlّ8'~)ljeG)[y9q#UdsXceRXup]V">`eaN 3a6r]Sa6S"?L.nFM #l)|@zz:/g^~ ՐS[w= %gV]@ >F}14M&Lm~c0 q,:5]>*~RXwHr>%\ $+8T*QD#I1+h)IQ< {]`aĂ'%;ha1*SjK]b~ fwp٬ ZV-"Ty>ORxNߥs(/U*TmAb`':d}e0AϤphu&{M`]'uU7ZW֑f(D0bcinR!) 7Ymy!"swDj\?!ad~5at=aCmZRAˠNj_u%DB vP7fK0ˬ86jekTaTxTa,&z!Sky ^# yaVUeLb9ay]e 6P^ʠ߁9ZּLSZg.s}/,y,Hb?}3 41'W E@2-뀙jUX`,g.뗧Ь.cO؝\7F\#Y@G_N%@Ď Ƃbϭg!9?k3ϥ Ϯk%2z}\|l]fBn⪏,9?LsC~1&e<ԓU=+8:jXb+mFp]vsu}V!>k)L=Uns'hpJ5(ċ+^VVX<-+q춹z|W6Ma=j|>!ΚيҠ E cA4끱..L 6™ xN,Pz].1{K0e{o1BRQ 9vxf vCwo ?:g ?[LyNc#FR(]2JA%Q)3 cS KPK/۰D F8XIaUJmxOӾ8y%2VyG!o;kJa29.2GƱ^}wGrbr,@v`f^#;AXǎ𾫍P mZݪo.2PftY^\;F+/QVXa3ԾTВsKez헼+Iψ*JH 0I\3}xY,{0lP;%y W:YXf ߋ Ǩ?"EU Vo}-_g߾Iƹ}Zyyd >;"hEb^:CqƆV%)})J1s{׶G18@+NDq Mz`fBk30{¹tzD ?j~60`qyb&K\%N{ܔsMV77Zϸq}eN[Q)[9J6RBʤJ(o:sٌ1T~y'U-_UWUL* B-'$C֭,(lTU4.r2]k5hXfI<]MhLyÎrpƜƺ*HNg+b-^kϚ'2%x|ލpY>O/Nߑݓ~L ݁Qr1 HWt%Kc<0L0^oHh).;Rf7l )7#)xOy}?-l.[PPy Ĕ !g y˥ b=w%|l6>QiZ;(DyǴ4VK`nw<鬖>) Y rr=*" =)IdZa90) 0Ʌ>訌okZNUDܶ1gU͘;ƾ&U+'q'Y˭d`{NL6t8Ўc?v>ߜ^3מ5E; \P t5sR8m0NbӜ6[#[MXlY;Zx Qg(nJ<:УVz'J0j^X~9N*ݰPWt8) ŝqŠ, o #g2Mk]q):;m D SH1 3%EJ]vd1f7K]7+2I'U$Q |Y2= nhlƏ+dRؙ0g^‰:J0^pR )|W?o:1&WwU)$Tzdk  {x앖5.~H`{u]XTiUcOi˼ss5J8Ѫ'Jb^I9 i-M5:WR{l?DN"@넙y:5U$ee~:N:cjsiO܊ʞ[VeR3J2Ikfjɠ:K1pS,BՀ<s2??o뿢EUT]h('cCǐcH#H#ơo%=6"83Ep)cQGV0H:#Q/\DZ=$47ÏFD d}؂c:QKBL"gcO FXoFȘ3N'MRs ^BIouuy{ߎ1J zXoF@N0xx嘾oOу:SR aO6Ms) (|~y\!YB$(dzYCF[ir$61?rW&fÖ("LLj rӱdr#[P8~ٯ 8:5WA/Ep#px[ dJ5Ps,CQ7Ma(wЇڑU0G&Yoj=+[E2!fwlo'22Z7ǼTƻg`^CP5) %aC9|A9쮚KaBA8:6=˵^ĈѺn\)$n.!^1Na7iw=8$ )n<oAl g\T J}:k S/{ir dѸI^2EҠgS=ٽLN'Ќ}VT7'D'n ~G5&ix&0D\^6E`$}-%QMIp l/EfvW .g`apYJ(i]utK2H)٫æR0vP ןHHa⩃EgGlD ;*1fd*G}sB$ϧN#+s$Lw!OS"o^o4ljIӛr~Լ$1yYNAכ|^,ɼ?o< K\c_o,_oo+I^&!VpV/n_mn.ҜtY fNf1TweY]TVqi4-o#Î:qEĴIXJĄFdbbJo'ށX#1B\7(ԲpmiC{:'R{DFG'$s1#WCd/S"FtTF{Z5ԢgKq>{U%rx<WJI?}LiH}^UZ+Aetp/  %B -I7&:X8)r15NY?EVuA¹uk ˗w+16>LptHI )"=zS'_d{?M_kQK.cӁRc7#W?Dq Oc,"0<ŶFZR.@a)?%a T7dSSTT׭Xhr j@s&$4ZM=#1۟&0G=XK+3 bFE1<F hY#<"Bfشmj@xm:.<1%=O,(p (T$0-kzifR*Tpz'zm;]ՁQB8@BWd.);H-K b.@Ʊ K-k|m[zyn.)L _o\ lk>q*bGWw`D4DQ`cWx˨3:?Lو_ٟ(5JAȌŀHDB35%^A%Kb|QN*m .B UX"4:DDeO`p{ҹS <טcDV=&p-\Nk ,r%J T^T1 TBRI*$B5>E].٢3Y̓DQ*xX_֚ bTd((&*p2#yDyj0BG+ Y Z8~k4&,ȔQ:rͺǯm[k*Ng c9wL,N  j"@,2iZ|4)S {-~=`ϳ ;0 IB߶FZ1+AĸHAhc$2$ő:~ƾ+܆5S˕1<[8gz#=;RB Eͤk ,UzH(EBmidw@$ؗTi5hY!rDZ1ߴH]-v-j_7Vs4;D3H W21:3B($ Zx'-@u` 901,hp`T,ݬUV*gUE`@:F j@)oľLZܳ25ja *-jHL.|aD>KލJĮP2E b)%V1ʃ|&KXRCDq5 #ҋAi9+#9gݫEUfLcҋ!ʡQxL鏥W/j]口"TI"]j1( pL@> Cʱmw2wfa*BVtS!)3e/ rء&`tiʚͤNxZnr<|7x1] `!S+Qje̕^%)l9F+%Ob,S0Qؒ)ʝb92W 5B 5x&T7Q(46nZQt S_wkԎ]ZҐ:wE拕 NСv,e!y ݺ!y_>C ((;q1w3\cY0Z*,X/~XV[c׺ֆQ(`*ʊWﶸvYb"7j\t1gܿ)+/,# B[ B 2Ǎ72U v+Hɵ#zpMW?rJ {L"q12-P&+i c*òw{Htd_!i7jQ?tiDiJ2Vjw{&k"W\Yt5}t>RBx+XǓRZZ7+f*EyĖeTRZR5*y)tr/׶~OF=6= O \AI>g?6 qK5ʙȨms$~Pu5ص`U?g!y 2^I*BFjGl'4}ًT̼]iwu\}GZyʦ׮kT6c[~k1;Oq|F$HQqS1G *sN #;. e];}U$sIiZ> >WՂ]%o|' T=ZwF7J}!P{DN1$3],A+K薹Tg2 ~V ZgIJא)T}E(9bZ-fYBf6 9|qDAƐoECz>\̗.O;(M" 4H&Zo;cxP`7CNj GbqxLëSlYdjL/g˴Ǜ~6 ъ99n+ӊN.|ܛ 2Lߙw'7޲>nRW60&}Y*R2@\ Q^<#v;{sz;Or\5X%*%ݬ_Г ${ĆCC#}M/_dqPJwW!lOz:E=5_4q0%&oBe9/79+\Oq c모ovl gŰ 1Aol2xc$ao#5)7-W^7c:ޮH ߍ%˟?ZWr\Emnb޷J1ߍ([mٮir3-~0?8;Kwr18zYY7:,Ni5ZnyYPcOa&TʮkC<ŀ}[ ~x-PlDSyf$4o+ 7bb~_ՠc~yg&'E2LOwK`DP$\7yvQu.`*)P=_{ SZpvpɃ1A$ѧ.RcHc '뒓\KIhn19qmcyi LNJPj%\}EdW0#>dvX tD,]#ՉŊ[8t,,1Fqiڻo ~p1i"H1Jޫʘ%1RuWJ&B:`i,W+^ValicHױwky} r6@! bt8 y1/"BL2L Dv EZeJ9T_#c';nQn:w>* tNvuY>&Ca^,9V}gRdˑ9\[AJ8ep^Qn*x!"|zks9z6Ao5gl2wNĴBK ܻf "B셤c0I+] q4LH #]i Wbto:t|QӖsɒ*Gsu[ ֟ DJ_'WgA=*UO\}DYpI_J%OzZf$j-L]%\٫Zk>No'/Xg*Pu7[v0S,j[_bYIUs1R2q{ϔ[U[,=3Χ{WW75 Jau~J"Ih1 y"R%?hj;0:/$q?{C7$3C+,A3a%enuڒ;0r5der2`7ߜSUCGdrN|d5}>ߡWAj޼jX/qp kݢr1T\3]%X2XIY j./3 ,*"yU"g;V2ѻkr⻿6ڔ?:!5yں|j2`R»I.^}l){ҕ}GZdDhR7=Fg_'.tnxjހyc 2+D EK*ƠawDL!TQY+: 3ĕNK;T3L6$EM[FwK-pfZ[O*`;QQ'{v'YgE|Q}ϹAPz;!fJh| 2eLJuXq}8.>n^WcY_+WG ɿz 2& gƭOLYAQYE}~ `0WӘˀz(CH28dŐyRdl^eE6f?֐d68IgC9<" a 9 ߣБ7]įK?>9Ƈi$bd_y,D0dFsMJ%_zmMrXLix٫Kk2ar$<]*S`Na-[ g"Vbb I53Ƹ3DNizNs;5})$I{GC`.[H3EsR*R+D'eDʾ̡= ָ.3o+OHy6l@jshf[N,.s@5=zpFD-O!mAE7,-bYq}C6Z Sswja b"(VWs %9|[ dcSXݵu@O[phF^JJ袩dFCjjI\Kjjb~#g[z}lL5jˆv )ƱzgpUv:I€;b6Ok1v|6WSgJOd P$yŪ|~~m_@@`ruu3 8h1`V<{w)48Ȣ|MZ}u%vqRnn"R;LrfoKv;L4[9p65T2'ԃ^YQ}GWZKΎ1_jiQ[Gj9G4OQ/gfB(0q>5snԹ!5T, ;53^bqzJyDDxkMFSn̺)#9F7ٗ74q٤+ҝ=k8SuR?o)b~)̫~\ѺKIlV̠]ӴZº]FĔe$Ʊ9d8 +911 C[ hi:ֱ| K=aޱ~Wgl 9R 뻨p*VU{ːl9\q$WkdO~8qJP8dbFװ^dS%o"uC5thcqbjx{y~@u5xe4fnܣ7?F\}F+ 5 R6M!pjqT{p>!񌣠^Fc9)w K%,iXC  =@׺Ɣ@2M s+Md&u* MT׋?e>Z:GSg wDʌ2"pdN6s"ZpHMf}u w1M Lѱ4D/w,鼭ebR. h]^)ek%ָkm*Sv˕`;gV߈pxId&Jc.tNϙjdBc\q:$:.z!璉<ԡҦuKbiBi_Kn7x~&?p5R>[O227G/"E@=Aw/e]/qq߇*eW|S TTræNsJA!n`#$wO5bZ1)6 N?wrSJDesN}/οI;sKqu|5ݾQYLӺj'10jDKӃcETQ(,ȹеAQt'!4 8Sj\{iK hO{ﲊN;z@ztg;(k[f٩r<\^уX,lx; uu;)kUkbͪbAc\/3R[<)BľizޟŬ}l#C=NvW v5Bi8qKdc 9r-p'_n✵ mKURᮠ~v;j.ϻɕK+N[+򮺐N `Ьi  {.goC*l_l3ؒ1TZYMe\9/   G8Z0ߜ h˴b;#6 A ` {}5$Բ)8ٲB8{d[ڄ5*9tS( Et*yQy-TKkZh>j3Q7k\lG_( Dod B%V0+4J: )%gC9* ;5P T IlϊD%Si6'}KV+.(J2.ҒM>r ( U(*Ū9̎c ŒA&H>e1*lH+;=jh;ތGc׮7BaTjttr$(@T%JY TYv9Dj7Xx PH/U9ȹD Ј>%`*5%ڠ&Z9@f " DؑsD=uq0Fxu굂e=h&YU!搲w˗3 o!:\(b(>y^:[NӧxNF`y*":yԊ1V,B]QSrljz|1[$y31ƒu<Ѣ++?3]gy9di9I ί -ga ?$L.>C䱎WT[m~P; O;8ŠMA7%zz#=[k=Wϟs~YO7Vn/9~,dJX;$zkq۩d۩cian'Zt*f_q` qs9iY<*u@ozC+R=j'v=fݻ)b/ #W玟<+%\S'#v!q֪ShiV _1ןHn ׇ'ˮkLVYPLmS%hs#sdx6V-r|q4=e&dQcba l/d h'#h-5n#s7m-GO6^1W~:zzn>'qt c_ש l iBfB~ ZSyN/qGMV^se)^̂Yϫ!{`#pO=<٣8Qw# x:Ǎ>tqF؈h37xt )Xbz8/|j 7ަ'c,vЇ6}{zqԠ+*(#yڸm;pd >{ tQO`ބ9vIۜ\5&Śg@0pۦIض$t<ـ;Шcfw3 $ɤ_ЃԬhW/Ex6\7rE6컣ɠ*+*z v"!p6)c[BB7XP,Ʒ/^Ҏoor,0Nj6-MǼv!I&@bCcy-mFԧ͈ TQ#-^tX\`5͈fD=ot'PB޺( I|m$-K q\vZ)!W3])Qc_(<|bRv?F14>9Dmn fr}yc6boO mn67a .;{O0{0ټjflB?K>k/T^Z1M׈ +lo^皃祈hԞoCmjF:wFn6xUh 8){MH,rіI'XNm DU%OHR(d% M$ X(V Θ/ZcaLb2eX(TPP{,ԃ׎6>o MZQf#K٩?ē8`֗{e|1Q8rva},Zzp^ 6|Bdb'Q}rv*ߕ[Q@4\Ww\+{R?/yc֟ ]߱ki{ܥ/W>8O.- ],$TnW?;Z,~fgg98:~5nYx2حWy׮|w~y^`r[rS>Q ^{|2an̄ IjN8Eb.FT s+:.>͕zGr5{1'uؚ-ZձvfzzR3O?:vu?Gcii\t7Fgwscgpv{.O3^g }>nfڤX5uR~51,v'ᛗ5_8/5={ ?z5u8B@ )`#eUlg*0D!ze ިY-94* $oբI.@>pqЎQf0I)OS>wV! =,B>UWR+5ەӺ#;9?{Wa?;;~m- үh#5NǮؚ89';KG??v>y$긩t\~3Fonnt+^̫e5Kq'_)0N_%Vs?֙7WR36b.P%O;+Qxx,ĥV]P-~H6X%0b[٣h[#V@2 =E;mзY` pT,T46£ (l UɘA]5 !v8boHsP:Z)C !i0]O,`^iR NBksH"Žy!P[~RFf-RѵЦVj Rm"ǿ4jt_rv \Cg T,{A!!Ӷ'6pYCibw5NGجJbVf6apBIZ Q Hp]OChL{8IU`k=)x}|N+%z61$<5FXb.r-RCUhJ4,Θ6J]PGź+aC0A~YUAd?(Q}"0;h5i[Rz5ix'zѐ5K;Ue%M"XDιYvYVLj:oz\ԢBXrRЩ/W:Jޘ5ߕlKŗPiĖT"Kr ljՠ d@c WQs2CYTTLv ֔d RٞFs1B >?$>hbN.oM4Z-. gSNLM5z<`@Ns'46BNlF@xݬ{l43Jv ;iݒ sL Z(kV- $ y(=b_Hw8-FϊD`(`Cn}dʮyCf7P</F?x} +s)VU*T+)gqdA y* p^To?S3ݕ3(RH e5B{׶G\e0ΙKDاݵ_ aӂ눻Fwld4YMD@TXUqNƍb~iMJiua^yUێu0Lئbryʶ@A.'2[ BSCյ$,xP89fA4J8/aC7/=;o$rOaǭys2O% :La<թNʰ  D/^ 5 ԋTj!v)jG R% ܴ7;@ێOu16BʠGNi,dsƦ hzm)'U|U|ʥJ&L P 1;PB]Xdch7񘘏_O/sKD,πrg!$ O۫h0XknWPsX1b璄nuH"Y=_s#P}z>Է YOOsTV5 ~ uVY|4VjTG6UNZךC蒰GzbZv#k)آ PʣZX%IZ-+f,<h0) KCDDX,[$ؔ^` h䣲rt-p ޽ i. 0~li%0۹{k*?72X 1^S)}>߀G=>L^S)T=-%G+pŏFoQv~:Ȍ!y o1Lvaj*B@{6kf<̬,. 6n&[?-*~ :K=I -)f(C0b'=(pu8ꘙUaܹv3ZΕ83HIb^34| )>`گ/Dr4K<(|uaL;of`Ň֐Jxnԯb#R#XOĒRZQRP}^ȥ~ڰUL-x Qk3|K0,/zR}ڌj<3 J]ڄLqyꫳN}$w&sZ:~}oO~5ACjAjK_8RÄ=s4OVg*+a9~r_m#|v<]Qȉ_; I$T~)1b6I8WҐTzMho |^|h[;`) d?+Ȧ5qIV}[[H%㮶yLP~Ȧ[DQbZ̸C+WNthYRo9b >CȜ9ܫ΢^sq9F6, -f>hQ#Rk/n51M"FتMt"u>F# i?0;'uzŎ\Vg3ک`+B˳U3qზƅadNJ ZLlJ oAay"w K vȕp~c+k1/~u{tٯ! WK޹-xbߜa ?N./rU}Y/e6BtwWo?r?Ц w,tu{>cɵ?!EU»Lؘ%Tf>U8o~$zzPn&l6t wCL|bX/2ptRaO^aav3c4Soͷ:]_b֘#~/iːkF{1DJn" P`Z;[`f]䱖X=Mȭ +gA7P#yJkPR}gO뭶/f%//&,د{}"k)cµ 3@Ÿ/hUEٽK ?Į.y9KL]Ҟi %v-=V4K3;/$x ||_)z󫾕źg槵8[~ (ydY/ElG\tּ!EP3ۻ'6ث ϘL%/FAhOwUlnq EJ/;5cg $7S&'+v]c#M#;vݏO*4d5k,K~JNKyɽo%/'I1A 1h/ SqDq(?;7pⳌ4nFm;WR~әNa5#$wpѳpڅ¾ꀽ$K(wß~#ukۯݿV4ϋ/vf4#O1տP}siZgMC:UAv(`ҨG>6[%PRMMӎ>hZ+!h\ᙙgfyxf;&K348\9wT\Ўb`! 'dL ljjTRJQD#s>4K[K1;Vw l^@B(d<9gY.-٫ fW0vyV‚K$K#Ch6-5[&RMBeT_kCbvgdŖKBLE{( 0}&9R8$_uc< }`OT2Rzpm]aDJfTRdMZ Lhui5䃭-KtFn/mXfOAR @q.})&% V> &o6}f?}Bɟ~0z*o/]vAsb )jKR ` 1d- tMRm%X'Tv+ۥF`@u6W' K&zx6yaZ٥ )yIR~wE4beW lW5e`*j>H@-6tAkmŅ\mxzR7f?BIm HgL |J[ E']_rW-4oy7+GEEG3H}W oΆs(l͈ﴚ0Z<됃HmE,[N{ɣ䤅}uדtKj%Zn58f&Ԓy{vLo-T'TًP]k*!2M3:8#os!?$ȰNe.`i蘽o"*;pLiƿf SícΙyFJ4}&X2&l/>LAv6v˩ d;1gG`Uk% LX`ʾÈk_ҡ@l΍TVX?tTQfML|Ez%/T ̀rYb{Q'Uys_߼['4 ˇZKwi`,;-7TǠ߽hk\ 9%6Dmv+%3}n)'T9!ՕR@P }HP3ȳ*,T޶f$@TTL?vIt2+JWvVw[H<8A,"cq AXҫ~*mKJ5`pwKzCP@߷_x߸VSV^$*|AHpKYܸPS UdpgMdsj[,QTź"&>m@t}s; ᲏ċ?gzk-nA?_}Xfߜ&$EPEq3Kڊqc"`cћ ˆUhO:c U*]4E\bUBtxVovl ?vi| #DHKnsbyN{{]ͨK}no[uG^{sWwo\sBAвƭZ?Q aH+IA 4o&2ZcW@B塳 Fm\:BsO2ݩW߾k"(V/ĸ\߾+GAnl vZzP\-17z\IFB}}X_|Ȓoހ>}sRɿZx2Xۧ Q+zΛJ_og xPyNY䯫A^Aɵt;%むo0._{]^|:YO7-{C2Xz)tTy0m )?~3.We VZ̜K՗Xb%PaRl݄SxYW*r< 1; P$^€RUqbeRxڔk(!{F됫B͚JC)bKb9{aށކn (M:z\3 Wa5\D.`0|HJ7e1ݐIhĭ~S0CN?yص[x?8&*2{hUm4L5NR~:RɫƚBA<|LdEr J6\qѪظ%-dc3zzx4TXN!*|PaG- rwFޙ rC\z!&~t{Lk o<-̘+֭3 6d̒]Mе~G*4܈(z@0, r//An  U3nTo<@ڻ0Ek 9;oQ rvVuuS)iLܪ8 S5} jd1a7ٯ`gx %2m#B!%2z HŭnP^3&9DFY؁ף׼u6y6˰(,i^Z}W#y'g @I=pe Jo/ !ol/*i8:ٱ2XvCqm8v>]%k6`v; LZ^luCEL?vzic)YmeV1j4iأÌkV Lzn=g;V; /rfPdqҿ.`'>T{%ZTxq'}ȋd%ey:y;rzEſZ `jP;YHP0"A2a4LqYaB~Lj` vB6?#A~BBjgr ^4PYub7a.Pڍ[jS;FBzIN4N {yN>{{ѥR;;V,>̑Hc؄9 B^o 鉮}s{\1ʬ>zبL*A7@l ʐk M,JQo7>k][U [?!T՘TB7*M0<p3O782K,#Ao'#aR \8™>`x2>3ѯG|녖ue"A!PnR~#oh/J%Md$,forbj3$k9ŁWܓwH[5ŗuuuD!TI#x]hUcf)WaB##2ސTfgN&:ȑɵdk̖{se˾D>84O{ ‚ؒ: >\; pWX% DB.l'݈0BJ%iO-琊)xU\N9i,%iQS`9" ?,#8z2DxKtñz FsV"gSʚ9n1i9n+-aW/Y#a~ij[|ã|O\"xӻlmnnY{ϫɳ@b*kw-g{cFp3@P#W;2U{Hn<5R9YT 띕V#W\|Ȱ]9ӣ/J 0ee~tجJbVimbbh vV7L<` -~N mٵC``~F$Wu0 k&^, kY#QeoA ח-Eu,+fϩSճz"x!?⤞鲓 u!/ُ2,bP٫ߦz{hc7s ;>/k,ZnF妳BMENZ'%HsIH9 0,ԋ'SsQ&-LCά v.9c2;Vjowq'>t7>5zz:?]Ҝi1MxۂK=kvkpɳ . `+S]qҝ GIƃsKd}}PUw4*JɡG5 1iH*khR+MMl-S,1 - Wu,K*q7Cs,'gACMxC /L5"svKWN3Jg*RqmaJq^{+^dgk`b6ƨe& Vh)RBp才.Džwg GYQpmg8?v 5Α;A<` 2~ i3F-%mG7a߽|g!^ޔ?s'ѧt匠P p Vh.9 tjEci^4FhM ,)klf" O)GbsX' G~ >?!L?3~(XA I zA0T gz:r_1@u!!H8/ŢY%;/eIiI֥Kg΅"ŋvy $g k-$[P֒X|N-P7ȕ21!F:$1ѭyD e_t1qv!h5k N?MZ RVAkFBz֔Y,_$+H W$˵_i ҽJK*89_Q9tp6lld/ZC~`s.Kro*ʷlÏL'qҐKU$fz9~DcІ=[4&ʼn{Vq UCtҢPQNcO,)Ǣ,$pAuzWng䭫NyFPJ`Rː嫱EvJ eW{"\0KuK$,4-h82ZxHhF*p./y1BoD аwJ U<$wvj1p?w3˜5kb@l מzod}cb#K눁}q"_O>[w(glCDUa!" 0XyV[sn(2Em @)Fsd,O>c{:@0!DBo |sOK!ݬP@1 `k HD'.\b-kVyކ8h*a R Wqb}uE74 ]UfB]{ !MY{\Xj]R(L<VOyYb0[ 1S4h9؞j-QL6c9X{ϸH f6Q\҅,8hF|w{Nl2!mj' Ǝdljf]vw6˜d8AZ\ r,nY-A 1P=RRvTY0|J}hESO(:u!)bc4% }7^&=?v\ EH ξ hS F %M2bQ&F[Zv8"9ZKjk+,"VLHBRNLPCh5h<*WKNlhRL^EZ^:c<~>9bɮlsZ]哵kޡZ[~KESK~q,g"[l< ГrώL&L2|k7K}dQqfαy^ r [;XpLrтXZ?ѷo' > aw-vƳ۽A.x9#fGSMJq}pK1n`hG:hlp5; '{@q h28뿨D#,G8oY{Agw+jmmpB 6}4J:6͜!KZvʑ3¦ d7aވqT]>wdG^` Lx d7n;o{"Ȯi0sNbl9vh=oq;zm^sԻ2yȚȅm\Y<`/Np>~d\?է kJ֗e4mf bX/:gd% {!AABpQ %sѼ} D-Ȗɳ1MR 9"J(>νP!0sxNO:uL*נ'"Pi8ٔqՑwQE`QMS XjIbJ%Z-`m.(qo)%2&kHJN}h%:lV}l H٘d%S\-La4ud"gvh0ja<Ά^bn 4`7Y>l0vnM)8&;6FP)}Իp8fv1E*(Hܬˆ4,hYkm_1y}r9 `Tb[J9"-lm]!k !d]dL\A-m6KDj'0Įj:gwW\^B.`rLUE"pMStVtBo/%8f#y*Fe\@2'Z,R/1Rh5eC&b= 8cK6>G?X>~80>EjUOB7SMjH$k"Tì>%`WQl)jҩ#wyW0FaYL#+wZ Tt%@fL`[>mkA{W=C5bhL^OXt2!HیPu({ mnEJEh(}|qh'_В:O_O.(6Aێ u+*_^b{NZvTJ 1 *=NIJ|=>C>:4~, UMA֬&V͢MlmO BA7z_7bbGൠ^mFMΈdqf ' epѠ`mZ2ânj?n.$7_@@TtV -;ъapIG32>;emzh?0ņö- vv>2T]o}cȆ" Nin hþ!9#k9yցyƙmdk֫"嗷oS6|Xmz]_+y{vA?p uY-ޱ`kY1e6؇o VlVQ}[ǎkN؃f-˴eĜ֊3Tvy~ZrdhkB]GHPFA_3͝?ZJ70ym mkmV mV$kyX$Hi>F-dO6n%Y800MVůqبaj7ۄ'.-ЭbLD.\. L6x~& XFss[Rѿ֩vhADt"15s֢%LK-~D$Z;ɾWnK~=%C[b0z]NN1:I;2!TU\"M@%e5xU"|,]FcFYGyݍEUآZA~3g'ɢ%aK+ά*ZXfE͊7xV, a2LX=a[F<(HQy6Q(Mgྒ2?])|؜"p$0[+*`Բ+1]$*PB{ qηM=͠&iIFug RSX|v2 xl%r:"IP)S*x]1+Q:ouRMox~T-3|9jxoncĨ5QU;hչsJl^bKTa¬tV]٭sJk`b}{3X+9`TL'mlFk6{5dhQ: \F?[+^h]LV>Ƌ1jD|0 KF3f(qE#КI-,xqCv ʮsɛL"ukLQo>˜e1{j/eCXrōxz)6M'4T3}9";Pc!a8LL20&P+nUM+j&=S gb;q3YuuX)$@х*ʔ=AXĮэͪo;`;<{+lO;Xy]Q)ȼ`8?xAHCFyIz]O'nk`$gjEJ1RIA?ŞViqF@ߴ0Hs0}byos@`\zdb9 b=T3Dlç-t0@o#; QDWP)meYHIy%}u-tyVmX:h;"e6 9Z]/H1fmh!9ؒD2kDHVε9ƄsO/kBZ[C3fe!0ڒ(mQnK>)[ڊ"xQ$G,d]Hoh0nlj)-9Wq:5_p)Fi}b>1Zl,VV ,Z݉ku\rZҌA[xGFC(ZWԺo>e9Õ"6zZCǢ`5-%S$D8,5P E -jJx8vPبA,rC4E 3E2}x3S6,/hYMOc 4l{{AK}dxCgq<̖x2wW^>_r5}'a#jPءKIĉ*2wX> 'W7Lsзg' gKjuA}(=YŜ$:]B3YҒ [-$ٝ8#69uvjD}dUҲ [/Prn0^P J Qb_ˊ-. UxNPvoS4{G|>jhIo2VZSv=`9#*(Y-Ib{wFG~[ŝ5գ\ V;{BzS}a{H0(gJ+QUH] 7(Z_Hu[]<[edK%~8ˍhwa+ ޲`3t7-ubКw yQҀd5b9?Lwǹd cكF%jK1^0(vX xtӥxAY @D"F׌D`̹ dZ^ $Z Qs'25"ΓK.ddF"Z䵒f3Qp5Gk8ƁCFͅC.EQ|,-EG4NJAVowm!Ee*r*V_$% mEE`ALyLZEH gβh=rxh&Lm h+[M6z Ctc,gzAk#p40YEWh ^|WfV"2eՄxL-$NL`rȂ "2RңHD;-@O2DYyBtРa#~D§\4)sI$ MMuŬT&Q]'J_ e3)xKڄ$mOCYpNBAB<@5E;F1+Wҏù3/5-u)GNJ˙ONhjt2P+nZ Z0 &:[ғT&CFC+Q7^݌H-hj, ZD\H4!"@F*`Մ]RazFJX3K'MD[5G#]a'oS-%"{hC(@xU&1LFeJf%;ёLhOҩ 1.sQ0hYf7faGc@5Q|B)!0-fWԒU) A%q`Fԏu6?.~!nL7L):-w"Ѝ:c-F3N7̮m?Ld_:֒6&& Q`Stmixg%钜b,8&JY:3IdJ%͞Khs9* 5ؚYr nUOATw#]sNk.T9AқEC'IݘbDHSxj" ZlF3](jȻג*}L].ţhd%gw_5蔖n?>0h1nC1k?z=X2%Vla4"z@Wy\0igƓ߹i=<ck8¥Fg jeݟHgZΌ{!0N;Eh!2})<ƫ<$BBMnz5kLJMҥCƚ1:cXcvckvo1~uu%KvI'|$\ :5GpxOd1K6>aȈlw &Sc G:3ۭd&Os0}?k#V-ZLg3Ubi~wэٖ3S02Ω!#Ϯ wwü֑v.o$P",_]KvP[*p f&[O|ZTR[sϐ1KoSMAl9#+*}{P?l9ޭSMaRMn(_I3 "-L>;Z'=hY F^m߿–}\.:~־&\.wŏ`gg9#~4ϱRA[=(6CRƅ +0/9`ePX?ǦVl*R2gMވ"J*dztaSmk9TNb%Š?h+D?rߺ:=1(y>.vO`whsh FֻTϿv[}~, ëh7Q(+*?Cjttas8nȱ@%XbRL2z1ɀҀ5W`VIe"ǙyǜQ-` :TSL']Md6ǒH:u!Lq,(kz`XP:(*Ov)j@{Ryb( \5|9ѻި~@;2కtA>:[P+PKAfQ*69"c^lC=7js;. %1\,yoԮ]m䗌3&k\YŕSzC1\`ĊgQg. pVm$r̅v0q}8bI]׃hfc&Į(0< w0g*b"?zX=&pt7 kAJRdsuF35ҵ6J!F"S4P+<>՚Je Xye3Z`4F Žfw׹( (? )Bx*=$*Ap{j>BmSbK=8e#U*,/*To+ f:I ^NMB ץ^|SԢ Uə R3lMe0ViJcN; TJ/~` d@ΐzl}\OYg2[ 37\7\?O)Tx5(^'TG]kv_n i?ųO!N88SKMzFDbg ?* >Z sJ y*+S)MXW5Rˣ>Ar6u}u^]_U׷Pu}[aAΆH"5`'#(AdfdzKGz>^h\%1u.Fv ̗.7;w6Yy KLXba鵧œYJ - >Tߔ}a.BR VJ-#up^ϔ֤idA`= U `"ݸ)+pwnp[Ig2J;@ F@ +vU~;ͲJPOyHs,SFBi(HTkuO1(,)zO Z3̜W-TSt$D]>!\vMN_ut?~Wf0t!,~ɸ}yA)Јl6M/bΧI Ӏ2YEY=쇷feFе4ZákpZ3>ДCZaǐ? R>isr%Lxr[[0&;f ؝u S,RF:'&MY+ddcPgAR VFK=,hG9KHET-VZhvqML3W2]IFIL2bL ^S79Enz Ԙ=F^[=₂ֆ e)¥PC|p]i mnԫ3Ch-kDQ,_ʄ#z6 T94f |=߼]%0RX6]ny%]2K<~xvFR~;z +\ԅp+P@SΰXQ/|7mNDa & .NXD}0"!l,6uܦtmA$Q'Mq# BI3`ZWMem>=+3*:Pr wQQg`7כc73 wf%񟙏{WxZˆ'idPm>q߬6#Ԉl:|<9H я'tw ]Vdf}BQ`tvaNO0IЫp^\HUuvW-_XxVزg' y0n LZk~;×Tb~4`xLбaST /c`ZXI KbGXVyuXGkg)wzVʕT!-9%@72kw_4,zFjs^)$8Q"r ^`-PAKgK/^S`rtt~JR]cP7<2an,1!'1O^)xeۛRfH)pHtD&4H u6%5qhW(N!F덑rl"E7BqLUeg0Dm46A8/hg6Źa;(WZ>DRMC芘#3ؿ +/"8ZxVҒ9$\DPT3_R꽠(JaM'EwnĕĜdB YlY*H B@2Z^!-]~ >l(sxk|F)a<6"`A`U,=̏2nF.U D,y H+zjCI=CPH?=  X#&oÙi>ď&A#4,^~xD )Q%4-`(<,e+Kٳ˚ܖtד;"%3"=Xҳ]$k s`+8C0RF`lLDnH UIj=E)4! m ܑ 5oڀ?ݼ@0h#@0ֈh-aO)'So.DB`꼙]eFRfam=]^߃skOb4RP>eL"ˡZ+w;ΥE %U{W  i-u?T:J!Z2pi)^>K115bg흄]7vrޔ'b{HI0A01}"( :Φ6́YRvZO^bB$1͖\%]|X&4Sְ⨱rmXahX $\M>]}ӏ M$~XǶ3fF<;s0xьjzgv };3l!Fv!=ɩu@ED j bvmS"KY4LYY԰f X0ek WzT u:Wm]NW.C. @jp{ θH}L`J?RR~prx1WTG3#0~K|FfY_9 6得l >ҬUxd UlGZ~yw~y;$MkZ0ig6JC) 4"gQN%bz`ZRDyLC_$Sܙ"vWl]<JLA=`+VɳqMZh#%gƂ vab҃*pS0^Z^TWhK2[ )}Iص*O,߈g$=ʨRh8.ݴ|W][>ˋF~-XR]->͑Ԝ)X݈xPrf+L*0@ *5H 9V2v/'[Ů!qdzVlî le4}.Ž%b]$kX^FՃ `adp\C}4ڬT,$U3>U.1n?ƟG*/U_l?z14H,(A3>% /FuǙ g9`w=|IxG nl2i՘_ _RebCe9K'B7^&X'}0hk-"[K}ɠA (-fxTHJ%T>t~ kѭ4lvfjf,q{Ji}2^>O>=Nֳ݉ _#]EoR Ҏ6MO]x]ot:drh .]Il.@2|nNE$NE(>kͱ2!4>ML(8#B}4=r0H$2 X#Wc=#{D #w_ճz6z6>\hr7'"WKaS!SpURe=;6u)wnӦ.wjBx-r)DPi8;n t$8tR0)4}vck#7}/Z+'&l8*㬸h8X nsj[s ՑL^^Nnm%cˋ`'O-osm:{Uptv1LhN]>Rebls$VMFӵF]mltts4w;pCۓ.yIсK/oo7ȿ2E}|:Y%L|請~}>2_=I!xfh(ݿ={wūQ\ _S;Eg_8 Srq1~[cPzGg{-tL]~]L借M8O6AA؛al8|6: XdF0~߳5ΧpPQ\b?~?eaxq~rrc1_5&]e̸+kcOή{6g?E=ֶCgGJOjXeF>C2>i Z:l #&Ɠ|XL5&aF0YvhO];Z)n5D<"Ula9PImoLzcj+jIbzlFVy!tSeYbZ/Tj[BVR`&{crHGYl Ӏf۷7gٴG\@v/4)M (\7l6@ġ{ry2#VgҀLc3+5uql9X",lGoqg$e4W0P4L;*٫ei7o<[˓: !y5b 82E.* 6ܫ Vv\ܳʎ(#^9fEUFN:b kУÁL`R(d%ΥiOw G6M7^ՃG^KTKt_r(2EpbKI䝮l] ~G_0 <= {J|6fnG5}-31hP` Q 2)-9NyO2\dt2j#lUc!Wis,dam@T+]|ڟLgȟɨL[Uh=^(|16E8y6"ʰxD =0/=BķQpCshA8{T@(1xr q)K*E hpT28 raw`k47܉+,͸eJq-tS.V6\M(_S>v}*w{UmTp\[]Bq!DfĨOzK2ZyGߩ1D,ak KޥkRwfsE ܄ 8$R܅4c '=6B0dEy"obA3<[MEV* 0 h>IOlMʗkYE2n v8[:AUt@),B1]Y*)ٸcAQSE8HIŕ\!bqOYILA)B6XkuP1B*0F" G 9(:G8 1i@7:!b0f"b,끨 VDH@MAccx;rp2eQ`9 okH=)4R ~R"Kr# j"Ns$:r 0^ (jQ\WwN&\X|\ ܗw)T0?2m9:>_ ]Ӣx gP/w:Qy+Awχ'm D[=ic ƓA^I;m>dBgf<7Ϫb/O ,i[2`-ӭbRvE!pP/+s=Ú0!5(:&eyRh)ɳrAu`|F-D)O9%L"Fj Q$+%͛;8D5UiĻĸe>Hk-D[<'dF"x+;N /d/)DσI|z~ϖosT>?ւHR.o=6eǙAW˖7?R7mϽ>:`bHw%Dz+Qļ?SEIH/G aՇ:83*\9)ߵj+oQҳ7#0]w LYP fӠp1x9Q񭯇Z3t+*.y0/^{;%n?ieS`½C84<Š5HiӀ9* Fa"SD[ԂE':0jՕjmāHth&yW]ה]rgTKBts{3;<ya:%zьp)UWC!2r9rlKٜDE F rFF0=!]Ҟq7OY0ٗNa48jًc7,<f -GRf]RpX" ġrc'3N&wi*yBE((-"9su -Ro}̓NKF1Ԙr~Uʽ^^ߦrQli#0SEM!#R^sHS P$04[5U:FjYFNC%*:3c4j2(ښGqI ³ш'p+4u*wY](79*:"}Qƈ2dC$X6 r\A$h#P9!ZE/RwJz0JMV`党+NkZa&`ڧtFX/Pa 48ьcI+/ӤF0Jl|[GK. nHLfLiRy;5 ھ_ڌ9%1L!O!JV|$VQ`r]EG0GW \QDJ!H 0qJ+\0DBJEp9eV+y j.רx?AlHJ@M=! /٤(p=T(L/%?dli&b S$$zoͱ-'2Y{Z ,cʭr+uj`T޸OyըEݤNگͿ(s6YP̊Y~ iR<d8Gu`RqDt,M7Ylw9Bz5qmu*@tS'({9o|t'G9|@0as(Z1EJPVF 4nR`F0Ϫ<=7FiȊe}|]ʯj 錇/=\r޵nk^l']R0imM&ӧ:VOQm&nZ;wF@F!NljM9 Ȝg9%(*, RɌ:a*"R#S;_3[ܦiBlFjtIU9DY |jOxujVw+M|ʍzEI'J4eL@c+' !ﰉEIyL<$m~HYfZ怞>Sn(6W^4Lɒ+*Dm1C|Mtx Đ꒣'3!49,^UՆ8~ZRP(qWiI5OwTڃ;TuL&,Cˌ#3e`PknnR̩ Hcxw[jAlU(E7qK} ܵuV@:E3dLRmJ8ٔm#VGdA~"VI*¨0'Jm @萏\Sѓ˳mJ]Ǣ``aOĤԗ%`fO E7PYUA:RՏ *qGncAC: x4`5!F^4[k aFjɃpR,1z w0PNc< ̵r!0iP#U#Y5"U`ޜE1"̝ۮßtNOr{l5X&wemI YPuCL8c+,{^E",df AT-ݕ_fVfVVRIHa- iURa#̀`2Y}Vm<6߱gN Ȉxߞ9&)tLMsAE=s"=s*N5F`=AL -@I I,7?O%SLnұdoj`S2w>=iJmTeNY2t W%ː/1ٙj|/S^y,Z"%z!R(5T4L4 xCizUYvc ”C!P0GGqd,V 'Hp4|Z^{KQڃ4(w"g`YpE "㌪ 8r&n^ҟ?A2~GTF #xߡO-4&:=]f B(H8RpRP9&(wecoJpAn&c}ˎBe%s./̀QLTɫ (L ƛNBJeU`;[N*CZS%{V KByٻoR\CG({5ygtЄ3ʃ9Hhѭ e%(JEi|(ǴxNvYo`JK=kw}(.y3U% ?> >RS~X;pp3 m>l|}OCΘȈ!:R0̈3qJ߂X)*CԎF*=yZye XY um6* C)u]{~^IdLyx3Xܖ/6Ɇ8ۻbX p=L][D% ;Uw&*FLyx 2o.7)MЈD$z5$z.⧳/ET1ہ#(I&;$pAjdزg# E4wc&R Zk3bvcg.]~_ks*GdyC7$d!y[ " t~,1hlQ:>پ4ۄ2[%coQ7 k(CțD6% q[asAc3FOM¼a!3Ys2K+QO$ke6jarӍ Y5\ BHwSwؿ9ʠ9OEw`l?I4\w|8Ԡ1(d;`nvfn|νn=cx liyZf5!KMӵ\K[2oYU5d hvq1$~Ihiꦮ}`Q餁n cw.<ߗ=j.3򩨖vfS'W3ƋO!(d5v>0Ꮲ,[J{eK/bח^f=, 󁅇o,?! Sn% ,w__N^B:Hg84:y˼89* UUǀZ]D go]ċTU\u؜{i0R6jڭ(%C-)~N(~zq uYD]IcHjS+lU7^&&ѩ&V}5ҍS !$o~^TXRj$g~.]0ղnc\f' 1A7N|x' }d+`!]FK_>R= [Kdz`^H"9W kiXnB΁Msp>piD>MmV ÁZҽ;phwzn:4ڼ8oIQ`kGl #"kq;eqtd@ iNse~~BPH3a+]A]`ȱڹ? Z#zJUKP諃:ԯc| tO4a<]*߱ν 8`DO#c{SCmjsrz0ٝi<֦7c'Г-ضy5s8UHQ32]**tB60n_6o[V-[em-7o [Ѡ>Է-Y:n(*t+ ~_dF5ߨ&r65vMGʩ[-4;Te)hX8}<H5Ȏ &Eà+*!5iVX$B}v,nBvk7#FQOTt'%X.:ӎV nPWÎ)2AM#e-$ >EHkx-&W 98v4{b*GM<ބfQ䖂6q Q3߼N.~d~2{'<| 3&2!3 1J1 GLraEeH''dfR:riݗtz G,v+ol2NV_;w''? {fOR{u}YoiS+t#|3%Z|A$ H:5+k2S/#G>8MT9 SNd|EGs_^qӢS~ T7m-jP"OD`:p#'L .ϓ%HqaLvŵ} pr\xbI+`:ͯo-3v1Vrq,CQed>Lٗer`n6O_z@P#).:6 6+%c Ѓ_|h>9'"KNVpdbHO_"jT |yA5È>CA_@컅YGq6Uw E (d@Enh}I4anzwG7Er|A̓_O^oZW a2(ꎱnGg=MqFhCU7ٗd4uZ D59400_{|s7 ^#9XoJ_ m)&Nk35MPLoթ]949K]܋\Xn&H$pH)(@Z1V0=JyF XaK#Vjܵsו&KKQD>7g1'E`γDcBE0 JxwYT* ͔Z @2*NI0>rǠTD&ȅׄRXf"15H#ioLh\IRƵT oeme|aV&˜<;jGf_Ex}c*C uWg e#5F1#ԏz KS$Vx3؁ al X,Xv0sm=e4*eldv,.p|[>Ν=> &9Sw[DCqYF!vfšf&0f}|),H8rS[ @siwF;;7zswTn8Q"byV2[T z(1G2,t݃?EKWK t>C\F*KAR,\T?(g1PF6+@5Ԑ44"fBT穗DAxn!rIb #-wIBklwmN5a9z33qFʹAED0|0a8 jP>WQKp͛YdQ@Tq-#eĜXSnAQ H㝬bl+4bIda]lRMyacuys&ehOӂG~O&$/.udJ95z˺1TȔh GtQƺwI̺՞hukBB"ZKm& []N1XCնu=\քELQ !n-/\[UTJy]n.Z\~yyz}}w3Y|)5j́ t[g&^O*]πhitS"$%C%Xg ?=tyUk ՁiDQJ;S'+?5ލ mv!5} S8s> %& D FP׮Oϲ mOc\v/5{ gH{]+6&yb(ڻѩ-")%1rDi]Jhː)*$BXa2*}HBD kITjC#<R[K"Py!白-9KBSfef0S{ߎ/*ф1 |ph CX^oy,681D֊!DbEd3awWT]N>-B꬇;;R -YmF;QMFC^J RLjjŖ5Qeǖʯ2dBUV"oԩsSZF {)| Ag-uڟ_yrv#K ^ZWW.C,^ M tR31nw 4H)AȘt őֶ%ko_^E>2ӂ"3v2k{7 @O0:a7[3WhPpŇ;32I| 03$񓵼u7vRRB *.(EW~zf_Gwň MrTe{~@)U 83RKCҐH|IPҔRm`9*aXt*,g'b]jds)X6S@ј]I a_K,BBהF!)5:($\f=iB^r5i[Gi%.Ciئx l*+Prc8f9j..r$&K;^ rL^QѕєɪJۍ_<<r2h i; l<#LBt"(EX'egIo] &0!JZS(B[lI@%a\@1ĭޖNzHs :k PJ%qC-Z!IJ.Jh'Tr~Z;lŠXMBHEBHԷ},.bqzvm ա%אָsOyQ䱂k@,FA,rlލӷsEY 3nۯ/vbk 5~S 1Q*yr~7pk@=kGy>|TIbk&8Ǭ*)"0޹uLL1|5l>kPKJjDs1["}'`H#up٩TW?6YV|˭jv#N?X/mv3.NnqF;4=b}VܚъqH/Q$P ՍhZ%!+jTgbʝ%a5fᴑ^D%qVu(r]Ye9{qOJZ%A9(d"Ns$:"(r[fB *Q=ҋ (u@k03`4{|k_0F/nǷ# qavw.Mw ;s7P$vqQ(7`33xCT"B6 J\6sjN"Ѳ_K'1!R֙,&W/;˛ۥ_ a|8dnpz0l qzW 5ibxz&|!P)׊iROhO/@>I!j8d*if+%3 3ص P$3 [,JP{K+eXƀ仝 ô?8?yXn|+:&Ƕ&(}tUa_%y,wr/M6>MaR!#2Rb8}SD÷ΟCW Li/ YXx?`{'ě8|<;2ruw0r6 #~mfG7߇ ?~9߅<0I]IF.f D?GC `M=8HdvE4A0?!DþK&//:fX4:L,֝qa7s.s*j|7V#nn&}²#*IIl^PӈpF7eyͫ0ޙMNK|wQQykTAo<8oJ~`M!uMcF+gڃ5h+UlJ{ӵ̇6۱Y*w_=M'rhM~jx%nt}O~BIYc`v4&.aZ~\LY#ܭ7[.~VAN f'0D s|0M2bձ]{{UC C5M^xxJ0E4vu ,߇VY8Fs//!6bmFh,9>̦&Ezy d$8YϜLFtXM a=?S# \q2o s0ANzBV#l`z[̣Q*ZGBYjGY@s4tGR6{ %t|Jv=l\\s[nޤ׳s:o6мLJ`?8x:&bxD&otAq)6`K߾U)1!ix1w=`(:߾{)4Ɣ&1pqtiR"Pnec@2vp%7;:^9c#t"Nm>&pCV~r-JN2tsyDAQEԸt}sZ7x2r|e^F>/b6jSmJၳA0eE<8@!/U grbNvl`'nIɧ4; 5+2( O5X`.s?\̸HY1kdȐbi{%ɤ,*=rC\ ϚrVkPڮ3"Mͫa0#bO'#A;*^#8\Rց][(AE\B4zxD~8xE8\xY RRk2Vf\Y3D19rɞ&R2|ܬsN F*+ XEXC@p:ߠz0kBwlZRG+OI97Ҭb4'BL,Zw:q '9x0'0s/5+?dDz}]xnq:*K_XŏǞ[FLu <uy%bu>';~qS%l͗XA*g.)2EJ)m5 -UD'u:r#`>uK&ukBB&ߓlZ7Wpʃ*:Fv}fTϺ%Zպ5!!\DdJ.&褎QǺ]0,>4Ъ֭ y"$SD>ƹe`5mn<":cԱn[#nɄVnMH3$Һg!֍Jʃ*:Fv-NgnɄVnMH3 X*ܹ|-d1f:`f-{nxF.cbbLɏ&}1s) <#]*lWq2FL'J ,^-W~2'ZFԛM(5AZM[.arΞs\HVo_)V/Wʸ.v0g\8yC|O.Bڛ .<ص Xfp/2CMpY-UR sC.`c7>1&xp&ɥ"3+*dpgeT 0FK"#%K}=D_,SlOTǰ>0ɃoH($K,)sKFeLk$PuL@%f( ^ L0Vb}vjȹZ?o78ȳC;%wSx&;UW.N9hk}*^ѧDQTy$ubr-+λ_N]`-*boׄS~\E=K?)0"r<-<d]U\R)$l]sY0V $lFp8 q=Y$KwIv^A.X% Mv¬( sf& J#vBJL3,82˨ 29\/ !P {>~0O&D hәC9yB́jsp`<";6ɵ'1~J" \f(3 CUJޙh`6$μ̔K`dݶ3ƾtl7/:ח/`CϸX_X>3ȪAH1$9_GW~Q*4]mq\uV#9GRTr [_" [:Rs-S1dX cJiq(|'8V 璭sD}?$XQR䍶s^w ]4Qj&}x'bKrb+(1r%'|=Nȫ~ruQ-ufǮBz_>љ 23ćXJB!2$V|bv "S}% Dghk{/zU{%ykyn:g֗^#L[KHvrq됩c ,fI4<`rFդ`jr7Wh\a NޟEy˪eZVGk 9Tb5S6aH :8vҰq3[%Zsϊ92'UۚT\?qg\V݉IYšSS2R!|NT0VUz)OG@h݈W͆lgvxg/DN х7OҼ` kZu+k\i4X -*õvm@k32Եͽ<,vNxdb3LHVx9yRWyX3պ0_ ,7]BF SX]ؘ 3r7fa͆Ns9Tk܍"U,)"~-< ~_-2cɕUj4=tB qew[3zX*OX@VΡu0(9)*P[rU n]F9\%؍֬mh@\dXZ[\뛖x;K/sFO;=v1kkR[Ѐ u4Y+R7t 2WSqk+RP s)'2uf7u:S %RO،ڛe[ \W#/}͂)#Cy׿h1/Y>9fVAH2q<8m(hAEiǥns0JNԪYiPp h;i^V0ck@l3L a)ֆ*W`\iB;A2ˆcJxZz^D! 0?ɣLsU rFL/ @:#**cZZ iǹ- Xbq]z)֭Nsii=&a69skߕg[D?U^rhJd"}$PXZ̝@^HlCd v$چu) l)9N|b;$`(Q:Ӈ(XB="9ˡ:EDeu.GIߙSuh˥| Njy_M&6%dh7?JBu΂W,;Ol[!AD?cOgǧiӘE;U4O1`fDhT bC\8)ڴZ%"ay;dHsddAE=I= @%`2q̰I><@da,cQц-dC&| jJE֝$ pQ;rd,ԩO|xC'K J]q&\, bJ@KZ lruQ/\bDu4X$؂Ή-13O#j"ZJgMg=2A]U4GsĜ")0)ڴfZlB s\b%^Jˮ{ƷTE*c~<;Q }cL-X0@1ϔlXZAt hl@=)|:Iv=W/>X#MEН`Cd>ґ`]Z[8pb/NfRsZ0! KmMFaI *^ϱ2i#'@F?ȷZ1>8--^8:j+1ZRt<]e~hff2!'hlSp9-3G_fC O[ W1ۜvknJq\%71^@`)w-KB!{֞*YXc/1$!.7֜^ؼg"Pg*#`ACQgSzi32h5ӂ̂8}6qTv*C DO2ma#S&3J>:zi$jqd6EIʠ`vm*A2D^VebtWa=Pg3aw(D:&ݶFb`MJ_b&u393Zc1H0q(O q$'ё:M'JK9Z;*?2L-S "{ vbL;P ObTw TJ a~?mn_XwtZw_ˊ_Ӭ(Á5eZ~:OOUVi}d6ArNj4J(7|?wX!o>~ {EP}hQ8^@hO,|u|Zűzw7.F+'$']U:jG;{=Axۣ?j>K!8df/F\yw 8Sj!HNm%j-ZztʄϯS.G~/|U=RnvG_6XEmr+>\(f43Te M>5e^jF Wyu5l-N#,wc+Izc_mh-0^qwse⭖Sr_.1nD}ƂjRQ<D~9~.駏?|&m5SJi)qq_2  ]P^a[Q&ZJ{I0M_J}k_}l`29T)Vj' Giq:S%{yJ|5^}PA**Ώ/ҫB#W"j=UQm(fߓqj}N= I6֒r{ixǯdgU nd\܅D0/Z{[d'/;f+ #3o|M-U87-s2W>{;Ł_t`Яa'kn,//?dc~SGIu7ߏ7xJ=i5Bh+?.oQV!.ߐYۓds0U͍papU\x=40}*C I5P7" VOAQS|uDz%ۀNk ά-˽*nd9ciWׇ*їR݆P}"DXK1RlkGض jr_/=ῆ)| oA 1{ {GS<#tiyU8~Y~pTj<=Gmo)qM+Y'"ʲTi7@I2y M_[0,ѩS^ 񧏿Y&x#vFtěơ'(LO|?R '1پ0$ǁ1?|*PP*kmEb0S6~;&I]׶Hr'=AUTY,V4EU Q.=j:>26zWE/'Cg@r3 +PYU7b-Zy^ 8ڂ -\ ̹`F5ck6vKs! Sx,m Ǥ!VRӆx b,HR>e尡drR ՙ}ЪգzIׇ.:$|~!ꁇJv݃}u]D3يf14mx7ʊk PDR|Si{;NYd&"5yVCscvuG6)ݕMikgv귶dْmZ PUSޣ$?1# {nG1;?نމֽo!Jˈ !;?m}b'+zooaV*` H9Bp1LRhڦ|ڦ5 mj@oHR|IjeA Nw=dDϟ8 Tn ŧžrUפrU(GTQqkٜΔ#% 7gn]Xk>)jDGNWnT$Ͼk5{%`jmh(aCNd HLC : 6<^{'ڽ vѽl赐%f˯doeFƁ(VC. lZQiМU+f)$ZKJn r!Fk5̄+ g괶:a-j,>:u赬ӕx~r2jZ B O&wWg2)6woƳ#j!lgUDc~]w>/rQ< tnnZ*LCXjѳ{*Pa%%0L"H L+0SR~'sI<99W)&s1-1VmNΏ6,nT:+"*L/Amvc]o\7^ F4/OzWBEbQ̅94fԠX8HQ"^?ǪH._:;ǧG4;J7m6h\$ HRl#mv9r9,[@"NdX! q2Z`[ LAUQ+6/EVB0TyP- O>בlAb D))MaA]e t*C?x^?HR#͖x#׽IqkeHI1pTժ?NELTv,J-oMtYpc77d8C*XzR"sZZ7Opn!,ň$'@)Q vZp"tWDԡûvEl-4m_'Prd; Fg;7ܒ`ӗ[#w;i1hsZHп'(CH͏?ZJBsЭ&CE:7!qIS* uc .@ay$ {y]uWdT#HyoI:pR^"y6\:[ =<䚰)aM+broңNlvyjT r;q(BOz̥Mc4oyzi;ϱV/v9N\Mj;9y>i&GJ{pΦ᷻LAmֻ/ c4Pl5nJy[8I8 K+.(-r\4xW%dmupIH{.I8}P1z:Ae #Ld߹9!/B jK*@rc沔=kqd0.mBc.=@!8-uI8tD?v%u D\vJ ;n#As_a1-dzhȩH`<OʿGU1ó=]|RP_}o`Z>z[§ ~ʅ&-&B$0[3'הpZd~TyyZU+_뙄FzyEel̊װw}(q8*IE(ːP@"&P(0x~Q}NgFF5 hM z(3i aX)j05a OP8fU%R;t%SrW򵰫\X4[͓TZ2sUȮ^%ipA2ӥʭKWj]8_?8o3=>>vuO_&|O,5}3o8?P瞴jLl6{`IjUyXL |Ɋwjn-jķaV{v%u}R]u~BE]ՆDn{Qލ]h]DWg9wbޮw;D!v$bϕ#g8rp5~<:o^E*,C1S!g+0K% oG X:S#Y00ur%\:DbL1卵*dƚ5Զ5kD vg 5BXWZqģ;o(N!:!"b(T݉rNy15O5w ;KPkCOF;(F*1"L#̂py UF`؉&8ګqw1ϦoMyŽ8QxLfq_Aw<8 M2g7K11VXFS~Nf{SpdޕU}UyhVi\*4qk$0G;F7?Yύ 9^赬-0SxX8GWe~xcH0Ct2flBGvSDs{e4**i"Nfo-Ch22~څH9/KSxDi" -rY/9Ll;P,t@cJN^0Tr K9F59v|d&1E\Gd錡!e(”"@P*$HO 9^gO!IH őP"iMc"<yp b-7Xf3)dW,yC)h|Md!*Et;aA:aOHRO#&$pWj|&$>| ]$ڻ_Q)n̄sVfRȝׯ4l>Ya6 Y!^g DPFKk hZ楽rL;Er,z#;BT9ΠV-pw$=4?ArIAdmmᜮ--ӵ%5E׶уjBO …f[嫏 ZePUS#0bkBwҨwVf-F9g&V{Gq{7Zam(|#MX&3:gKN#P>kmkN YOu)LRgz@IBZ"0]~%;=LC`p(Prz:MUZ5FmR^ckD̛m Nk?3}܎ݴU9 YbW{,zvoThb \`VٻHndWn~qc}Y8\i'Tu 36A2KeTSf`+.gJiEeL% pgY}zeȫO3ɧGH!FXBy ^8h?<NRb2Rd Vb.dGC{x)!XXrTSچ2oS:rC/wSk^nW65/rx9*# !vKO H\nrί\!咈ˆJi.67_+*hjiSFJ s0PXB⧳+kfEwf 9ZQ1ag[Lp<4Ec8AD&ł !NS6#K~LXYSp/bp2w NysYJ"*֧wݖws$)B|ev?"` =Vỏ GV !#W RkW]D >͂IhW$;1$uo99<3ٹ=^8e%=yF%*{w2`Gh\5fsL*j"ls`YO[ϥBH`,*$,!B v3{ɵj :I6D)"m;,1""aVyU 1%r39Vu:7oIrM{Gg,QWG(7t*l*G[zIyP ukKiD(Y/E-$V||e )ʐH&VS3GӭLAQn,c ΁$GiJ D[)` 'ʗz;x74\]\^/uD|=)PVdB5ÃDrF7N:D0`uMB" CVrÞ(HԂzA73&F( WRt{FqԤ֒kww_,̙аd<%BZdT4M6(oI9}U@ =|v(NS8j0'Lk[5`2 3:m3㜃f(۝ 7|S?1c=/_@ShkE'*rf_AcENP]ڇqĪJiJmW_R_-i^3gCXSְg@?(Tzfȓ,ٽVX)!@:< `1Y{0J0k,;%j35'}<妆iG3=ѡ,CŢ,!-D\=Z+뫢CZּ5h^Ӣba"b+?gVk,NL.Ln&%٭ ,|BFnkaC܅)&`RBSESDmN5MU&_tlT`tW7 uZJTG-w ')bP6:j)%TK+pNRtFN|#۝%; 2l7|c/EZ+DYb=JM&r"Y'5[q/gtD7P }2 Yb`@Rj]`D5Ѣ|™$aD!0BTsِaRtx-}G&Ļ;L15!.?lB\CC*ZSk'FO O ;\f Ǥ @KtVh:ӶZY-zzy Wy ѓϹdLO$uNj򦌂ue /9R,Fm)9eSrՑ~Rd=(w³NIױE~Gyoy>k9r{k>j6xS>ZYo;Z6Ctݜ~Gk5U85x_ܵɯ ǖ.')htIGN0_-L(?p>N>ޝgZdoDp&sX>\<`L(?ԭ` +L󮅯0 -t`**ptG4]db G?쨵95S׾FK/T`|m9fMMw%Y~!~ӥq~kT$vzyTpVU_vy:4‘('=U8 =HO 1g> !AvپT-6ֶǟ="^q֏A\$Qhh so ;ϰ$DʂmNZ BEXGtaJR-;jҼ=UMeţ`U++/A V(R{+LS/V,%zP{hTJ s0$+j#f"=b#z #l×$\?3D__ x&uLX&VՇAG]vLa cGx*k(c '3iH:!(-)%P .D*%]V+rOKanLKaD#wę' Fʐ(AeVAj!(o$xGx:EeAԓ A H %+A5{}ф'7Zn? /ݷϋq_B *0Mwj]>WpzBZJ =|;nG|G:@5AD47fEtx7v6;<=9Oq9@O;3sUY\KD[e`w.N JW7pg??^)9\ewUkFRI-L"^K E!&4 + f:%FFJFC׃)"OBs=2F%'1 kŝt|8Ղ#éfI1֝),F^wKZB,ۖX;9bk̩25,sJDƣRR MBNzX oU}Ȳ 5pHJ! g39CIcvY AQI`*c#m,plT۱L(!BqG]rE_-NOé&.XsZ҆UXSINTXEA̪"z-LLV` TSX "xJ+x"S%PO(JX՜R2jyD0jہi/ W#Y%wܡ|a.r/bAâ{~G跕O6&! uџ/r{D>~ˋ6n|ӭ[ LҢsմA (Z=lrtYeb!*$=.[nY0 !uH";%5!Q9Β07R\қ0\}%u\$qQ#MX!( ~jn '.ͻ%UrT1^ uMv W]Õa|o`SB >wW) wZ!LdGH.* <2?FFYEnbsN]bv1Ysm#Ħ`: I-M)ygMir(ZMsQnyWDssz`ݭgbPJwo۔켼;|K{}wʦKf7QRƴ k%^+yLXa~3 rL}&^G\d0 `؍Trזwܗݪ z9iN=CO׀ZYm8aѨӷctXgk% sy)қ6Y0G9qx ̏gx䮲]|[%sPy%|^9O|ڶӕۖx'`oH-d>o9O(gށ^>ZYyP:yP<>Rl8##aCcj_^]ҋ_7WSp6s Ovבo(„3moj.7XU#k;M_cݸ*^Pgo0 J1ļSyg{adK*j/["Xm0u]|@*4]Ѹ%dOdpbDy2\:ˈFp\;ARz:e (#CUkbԙbGw'lVkv\r4ARS6ՏIng$TLҥɽ>LQ!Jpl8Q{6$EczՋvK yJ)RnOE*^bUERl"Y82ș2O+s4FZgUڪ2VkiPwiǟU!Ά9=༫6ϓ8p `ū +.mǜ(:SL#y_T*?ZI#NPoRhOjPXK/P VQgM+pT}!@ԀA"~?g]W,ŦjO`/2ʣ #)(x"GT%L#bo, mFVP89r p5#dn4cOo.23 Hen7T"Ԇ9^>,J$Z="ZJDMya҆)y:).UN:봏h6v9J"[HcS7Zو *][SA "E_(ݑ՝"Tb`'t3-#׸3߆Ҥ_۬kO77 6[`ٯ2IR`*&#: —)LɂX0I=P ?PΩ]j\q[ETH[Ẍ́ B`ZL6*O`K&س@#:8XθD_;>XS>> \fbT1xWbH!cm<{pD"Q䉥DIgA\1QClu[|}Fp.FMlb#-ȳPkZWD#N3V(%ZqvY+ >b (݁c,'Iր ԳV&y ,?j#1\X@D:,#ģb5spsP M5#JuNi6'>OHΎ? cX+hϟX $/Ea "x*D#8A*aΚ(L$S԰XJ8GZ)%hwt/*oRG}A#XTZkU@ ɿ>=(4w =: [V7hj9OM"@*] ?Nv|+`ꢳ3?r_Uؔ~0%`NG?"W>=cZ C/ݻ뺻<#\oo~_ywp4ٲ'^;IoT%V |h`pv .0\e0Tkp6bi4}C1g]3@q5!Q]q:QUR!RQɏ$]H"] (.ltVPh(>]FM!L%x8 &m0g^6vCn<6OF3*6U>ouc:#z ?liUgXע) Q}o˝~hdt/fkK'INMZ4i6_nB [>3 !ik- 1C<S?_ߛ<]a0s|wQ&4ҷlŃq@Yb&ZA iy`E 0N(D7!JzQ\94&)/a*A`[=`;lqQ"'hl a^֐4Mo5$*ģ*N^ pD%#<1T( f#^`c$mԎzY0jEhQN^sr@=UNh*!W`+<@iz:eȃXUˮx6tmɺXV! DmYށgMw4~;_| -Q:~" tP@TYkXVQJ T18'>5dXTG0"Zȸ  gR9cu&4Ff-K!E{#^b-jT0i(|G,HUmnTvDA)BQ׮`Z[y5N^2DFpHc'5`j4:+H<Q@x%,ZMDW-YΩjeժT7)zq-EfFDN"* bI>d?&0r &=K'8HBK?Om2m71]y" aTf4>%NT.$Wv`k-1n-´Ȧ؝rO_ )tCYto?Aiq]F V\Bkn[rEo0 ݙ*:;0í*cnW{ɶi5Pn#C J^,Cx83 3sySF.E}1šOD`g|?S.$|^(J%4-0jGpX Q;`47XKwqim.h9K Yiԉ]'D}x+Oz|TT*QU'&9eK![r bE5,7q(JmsÝ_|/Ԕ0tOMTrfb?t˓/>&7 @=]/ӛu('F~Fڭd\#P3Zh-oj#);8 xs4L<.81UBi郓r2AQI۱Sn#I7dݼH;}aj~r;oח4#Lk6oͣ'5)33uow?>X`L:<P ݡcR(A1ۧ y1p](|{uV,BQK+~|{v|\qB[:]^S\}ױՂDc}DHRsP:%4bk$NfxN˙$>1Ev@g9/¤{X]e ΨBӭS"3q7šUhagMъ NFm~ۡGvvo۱ ɶ>mqt7bE,~StXc)ɖ\}E:P`NɅ`$&uT)OWBtXZ Epk"|LBSD"|X@@sE29odLﺣ v0Gg*;eKv?jn;^ 0|CHLcڊl=.R\eIYYcDsdžr !;q x*4L#ekjOLo>^>zSւݧܽT\(WʥvQ?Ia&QS%VH Ynd7v Qǫuxy4ah$@}j+j׍ ֔ vhɘE`TEr8 7䭴.^rT~҈+6%h2i[q7*n:fFCQ3Z~NHu@ O}ߖ@J2:M۰d?΋iRlg_Wdp^CErw3u]c{a9|׉ׯ_y}6v",_fG!Di1pa Xk\P`PE:4 \UYT\c_wٻHn$)C{*ƞcݞacq?ܪRGvKIQTj*reK?x:0&9&D11@4+"a5sRkpM0Z /Kdi,GP0jT͟Pj@(1 t&F6B=֥H(I(NFY1+1iڞ&P@ٕ# ֚舁߼ѬuA#Za4iPq_Ƞ5c閅#ۚ`+_i _|{+^__|y5/F)'x5[\|dkؖ@2Jp{?WeF߅t Ys)ыjfT\馹v6b6ͣ# 3 Rh n(|eSl{kv%_l`G_vιKhg! |\(֌i)6>+ s0$>5YOhAVvǽF1^*ÍlG AO&10̶m TJ|uxt 3Ov9Rd@IչK *{$Ca9_ Y(ȿ-C8 ԞR{Jy+]f(ʀFi%YD7eTւPXתQ A&Gg( hPwk#9Y̲Xdlh12C-c"ٖABsb.UC) z-J}e53lcj(&mdS'473$IP׮rAI>XRP5e-M4=&px}iPi@ %d4 ! 5dULZCf p f Nb@ *-(1,S]0/ ȼv͘IXD!3پCNRS-= +2XTzٗE:Qfr6zIVdIag1N=-f5˦'DK v0IHln_/-5Ƙҕ׾k=j^Aj,v_kV0h*!v;mð0oa`ƯR?o>}i,$HT.^BڴmS7wmiVAT*&L^GٽO4s]3vMJꆗMAvOnf+2E*PԺpҔ4;A}g~u2;01lIci]H)HVYy "'%s PF1SnC1cht>3/7_ FqF^G5>h]ހG$ 7rNnCF.;zvLsyO&q ,Պ:j9^NOT;񖽺>~ƣ:"!9(yWSVN*(QC=s)r-> M[CL@ [u*Wl5p1SlnԈ>cEҖVُ 2$i"1಩4ΐ؛rl8w~V||8YepE.aҙ8V[vbse~R~^ܖ]LGM2g1'Qx҇We*߮۠n۩t(R#zc2Jtq?񈹯az)snn#!v@q$:ٟ^LjÁ'@YEr@v"Σd$iT*=2kJd108b̩5S,j0n(N@sRx1RA:RLD*-ܗX1U*F׮2ۥz)_VΛÙIDN覜3)T'!\ Ocߠk82!mNȎO|:3zNL>v[ `Tr^?;1Zx00a%YmD&;b.X 5tEI(ȋ8>d,!mB:8bp½FSuG=b ܅y'q1ֹE|CbЉ !| ln1(tz U#+S1>0s<{H2dId(uށL>(qHz#61pa{XL ʼ\&C,/a:T㕬+edos?4ʑ8ڣ=ɩKfOɲMH rldD|H5[ XRo肟wjtC/5FƊc`r8p#%ȱ{ƛ W닓F *src'2fNdsm5fٙ݉)C3H24+T֊$1m8bZת@1c`24ۏHAxB(7BJ(0:AT% o&Raq$BTɠl,Yftlx3$*7 P*]IU_R t-DhFCx]nT9'A'^IƯ3(?l.LOUmX|j?]y߃`% 鸽e!ARX&{ t!i_S§#5yc̴xw[/B,CR!kb@=\A0X#6D}@(u2lZBLmb;Zvł]z,P&J[ZvM6MY#&vtNeT2e*0|vsG6qj}k4[m$'f= 'ţ8Y,th2z dŃӶO[jzO|R k+JܟqRRIʞNR4=٭y^ZaB5찌Vr`Iش.fM^ +{-(H4EN('&A.=vDH rcT ѣ Hd=ʾz4N);^jIk]'^u(eӼD/Y$ʆT6 ИZ¦i *QYSyeP[yӃ$TV={l4m[>P>P7 Aܷ,aޟN@M^VPP+tPX$yPPCo*tSy(4 *σL4T \:EP4nK(og+/5})KK p-ޒ??o*l?>S>o)wwOw7W:hCW}{G$ڲYUuYcI%)mKUal.[c_?rw"\~YΆN D>}ga1[s/Bg/K?x%H1Xm:.ej Y/+y5Q6 ݿg'7?X҄?_&U}V:b1ƢPj4ڱg~zM 6NxYՊ7%U.CXNtE,ҙoƪ°[& oIiQ,rSQөUJ*Dcv*f_ז|.5UtPO')81~: DeT)Fz4NJTZ`188ݐc祪Vs#_/fL z/'ydn/ҏ}xi~gvCOǨ` /yL ?E! _]X|^߲~|xY=Oc' zbz}Ḡ\ ưjƔQ=+>ĩUđ ,(䫦s3W-ulK1RoH !-yH /2/}dw|Yz4,J%\HȪکJaX5H謑̗{H@dkfM_o2^(ńv-W⸵bD 538sd\l`A]u-{nƑ, k Ʉzdo¿=Z iֶM!ZA'^I]l+O=IrZ)4%AQ7`d[hJX۫p$O56B~M ̲.>81FzkWݔ~{azLjԄyg$GnH)3-A4݁+lq:',˃=4Lj{~ϼBhԔ4ZJ 2USH6c.;gsT)hX uU$ȁ>(Wjw;]fo67^l,5>I}QV H 93Ȏj ѫTSh!L9h=wESUޠX|}neP=߲! Kkf|WSK?]/-*\i֋|8-E;)˲fID?ҥI/*OT>{CH D\~"HщC[JyJׄ6/_P`ˢ.P 1Z[zm׃t[AdH9Ԗ|;aGYzr ׷? y'rqOMrD=biYYDxb@=yu67Ӝ̒:AĉcP֝S(tzuG<- s?{Wȍ Coq#=L{&;vO(pdǻ1}EJ*EuP.[Q/<@~R&Ԉ~Y3#%}pz/:ыnNFEI9[4>.$!) +}JΨ'NS,ňz!VFqp8~ghZf0tycLI*t)0E_ 8SVJY7TN^5C )XD`ZEQCN@^[u;b׋RZ-z)4h23#w>&E`yi3Iqiъpnׁc<*e—k84Rpa +:uDԺzK!"(4  VRNe$@PFdhb^^O w[t4~#p 3S (""X9ؔ jٴ gɼ^Guô$ 둫n $uʑ 4LQ))ʈ1c|/w}]_riJ0:2Z ZYAhHs@p + $G.M,EklPҔm?X5Rxg-Je|OyEm]-1kI!}8=Y~~,r+bqhHƣD7QZ \}l(sY_mFgT>zW󹌿Vc vqnL6W._T2}Qa  SK)6Pu,R/Zh/>OFbA޺SƠ(W!Q\` S:"(G4RXNZ391˿_^m$7CYym OWכ:I8LmxjYyBN~Irr,xyI!7tգ'6+eΕ"qѱV)?YД >6D9IR)]ԄO$^%'姞 kߟ쒚:$eTol`w0Ge\W!]g=orDsjRoO2r5j*#ײV&P d)4t'1&%I$!Ni OF`5IQAppԚ:PEq&@`( b:&2 uJFQ#J9-CTDK#[AG*k$LYXZXL*5fET%A e֣·!4zdG&8LZCZVIG%A%m 4\Ҥ l J$/&ſg8=fu^OZj م8ѬUi.nqeq(@7zK ,њ$,]-tBXה'VF))RTpcvM+(V/+Tʳg _Jh~/~=ND$g!|E-4 sh(zj2 R$_- 񜴀5ETɺBl[~_ O[ 9 'K ?P誚 h<,6<m ).G|5 :V:J.S P+lRZ+\&=14`-h B j: :bKzҧe.ύGLM8kUd ,tk9g̀![^E4| gˎ[:W3iT|s#(doNg%JPRl>ʹ8dڪgra gzȌfqKKMޑ WA^:Pdg"9@D1O|o+. l+ *%:\/3ACI_]%t(q c6@ubsĶ Tm&H8rP)\jlr(ab G0-Ϡ=WN̔bh=D*.Ao"AqԲ+ALjcDPY 3F"m3(BN&5EXd6j,w]v$HйD=+P#ixQp\wr~ʑy/WhJ\ 3\kTTrg@0U bG*-X7ZK>,*Џ3ۑ$9^/|*ʛmS)bAUKs^,B3FJ%Y(9vQxTSO`vK&U+I򾶏,QRDTR:C7YG Z*ځIG^&` 1ǽ  6nj4:ύ. Nyh ^) aF$hꔕV,&&oAgwjGҘm8]irkcxE5ݳ"ϕB@uD>wEmVOWӻ/߽h@#>yt Q>5ڭ6MXK1ڵ'rh#;` tǥ O>3$_>ٻ;|]^Tm{؋:x>̰a)b,>yC(d3ta_[*M?(R+8|1:_.m/&m@V|tͣp^[F]ͻ oIֿ;xÐe9[@^uso\>..]4hX[F[DŽ~5+q[ }xm)]7l,m6+kZK}l?|[.ٟ'*dy/NJR!F{!U 76;tIv1e 򍱸=vXyQz_e5w_WK\ /͸ޅY{|kPԴc WJCa8I+(lPLuW *{6Xѭ*U:xN2?=8xgT:f:6ǘީM͵^ <5)PMtgV!鞌q9E΄f(xoHNuˉl7_Wi5(r-ZX<ߧ@`xwn%BmߝZqO !ѣ!rHT]L,:r!TΔ.zi ,1c*'+Ob-+N2%D\Rʹś3]/&{aלܮ:9m| \Jw7UFd Yt~pŘ|wfB>{YW^[cٗɀ__ q֥ mX<pK47$=y5]^j>"UO^҈]dKIE>EerjG2F;Z z-dyZwOeg\RM0X>Bwx{.Y 黇?,p)*Xx!G>]i۵f{'B*!]NٍJ Z2pN5~/e,u4@k{=e M|r%&#I*T&«VٕYTYZ̼~ rkQuBrhs_(iGO'SF3Y$6ry-od)MEg-^9u)Uܠ:A* GA?3{:4|!Lh)7#İYaF1\gzȱ_1)K~ݗ]Lc6xKt7ld]X"YUN Ǣ#hXz+tп/;զᮅ(YH, *YGJ_"Bp%g>Dpx%"gt"$šHl9҃" @'6c癿xxt䏋kL`HS ϮW"ba/6[,#A6q/}~!d/Y)ui~|uTnQ₮yKTFja7XA ';w?|X>3CPN+x^HjsRh8[)R:ֻ@vִʣ J gW$HaU5-P*b+@\c)Hsq*tk3Z1q:=x) p^crL1w9|2}p8o~/v=ZS'7{M\N"/x&II3+/h38d<{H ^`ɧhƜDt%/U }j%hzzaO< GzccEc5jiԌP9fݫŘenȫ"2i'8:}EA? p4C֡BI]:;M ~rssd|s(c`AKTMw 2}][sEN LSxx/=-)Y"+" ݍ9h]o##kWX"w B|Eli?F1P{ uŭ&-MX./_=o>4l3y2LΏKi?c]NK #~AB$@3-iQB&и 9͞wb9f]7m;S!&IԐ5ipiVO-sBփw4" q*om/$涞34QJTP^]e8:n*}^JhɁRUrq܉-6񼪊^J2}ޘJpC$=,E~KRIiWs*iY$ @xbQaVx:%Ik$ϩXoUНQ›Ů㲍r `sqӑ7 УܹV`*(!AVzUIZA+'<,p!0Z)OYx-I6lAV8wұ>;s&ŅcC>R%\N!(cH@ǿ#gE?ƺL87<(`6!,pĆVe8FYL` tD: Rkv\~+2񟻻MhZGn^|r=JÕ93d ;;dOuM97k=Qޛ~PZN7>im(IHSPWwiiÜE2F0wmq,X'.(DB-6f(TBl&8^{^)x3WdTpCSNv@g> Y/qkQv/o ^1CvU†0[i[sNtTkR?!?l:#ϛC%z$*b PQm !npaO%&팧(XM['ZK TSFZA:7o3-[fh0nqvB! 4vK/1z0^~T|p`+N8x\'y=\yqo^_}Vl3axv+׭]@˹_ܴϿ iyJnn+Nu7QkBRRK-Rqga@gR蠌mD)wNa 9ȁ5 -}zp wwH|z/F5 ΃J>A5*+%=Ŋꉉ9]_˷=JI=;+jwf&T?5/ ٣zo8^~HZ|×EP\{t\ }\1A1UmK|liږG ڴmxo=_j 8N4 `1<'Mp0%\ kPMrm3{pv !ƃ"bE!ޮa?pG#I["wM(m|̴6"{p ɹ ',(M%*x1SMq%qW[MJ6@r2Zt,0 S! eHTۃ4à4yUgs/=NkJ A)ebGCm*PI5]; ᬉER%HB`Oa3,0\ aiɽK6{BmɬpL(ÄhPcpQFy|QP( "̵0.s^Z9`g1\h8Q+u7,l>>/#Ÿz w4%njBNu,˰˱l?)-;!hi_zՖ/k>,00_4qaICA<|Ea7}+,Ѷ-{{|p^<O\Oa:ӁqStb᮶pdnqrʭ4fKqԕCdtKs {~(Tu9Oh%8^)gmluRTX{8>} z;ex{#HOu9FKl5ނ\~樤un{1up4ɭӇ1,hΟ=Fq& cd8=y6 T/pB@u1 @U(8ɆbUNfu^ E4xU04+JQ~ v .L;BU)+aUa`ȭ*"_k4B*2^Te hT:bڶ{}[nݍCFgT0*1&vG1 ;q$E 2ER|(xO/D0$ٝҘnJ駜Jj~;U%lX,jQ֙\0z 뢽_@,da9$*fK۟"+,~E;|I gU4ocQdC1kbxW#Zj ap(,~5}6NzӨ;Wj ؊]V@㋩Xp8kmOEeQ (lov[l1Dc&]<ǒlΜH:$$Cr8 {q͵Y'KE<<,p.8JE螇J)0ݓ-G E #|qu}ӐHuؓ-Ǽsϋq<҅'} ǐqt]K»Z+}C֥}8P<})˟rE_r/782J{;c6fx4U%/Lu-+t_\?pNFh:f|VȼlSZwXQEFզblE֝7(=E: #mfU{Pu5TtPtc] H-mĭ\+ڨ 88SG75&bbh-aX\W?ΉRB84fF692E!:0gK Y}{-]G:0/뷋24:nx,c)l,HLnh*M5cdRtW,)yqa:+TZZO'q1i}uz~wb`-z'%ayA"Uk)ub9`<$m?2iwUMqM$[\_eRCDEa2*I+q#II{HVأw$.mF $6{z* GqaN'!Ƨء0$XC>\44ns6,mYWvWm|pAMln*"i,mcxHr?C  A{}OZ>/GMܲ9=v߭^7}}R+tA*blwVHo$XbTp2JIr%I\ 9ϓ+K#VZ[dcy ,*Mb! SRG Zkǘ)f6n%^j}0[!Orq۰Z ^'ߤ W{=)%6 /g+9B51K~4x@G("LF̄0Iy-o2)"Q mTɅbÎ]GOrY(< \$\Zs*=IԪ+zBoJ!*sq{^!l0Q[" lCŘ:QHphTE=4`P4jd#u/BriĀp^xG^o4 e D GGe?o&gJ-SV-vl_ں}~zW,~.%()P hHԴ:`̓0@vB0@q [Λ@ch!z oͽO g{n9t&B!l:71> DI݆`!؆ttS{ 7 AUC` }D/Ji MiFۡci}>rCXVK>%,~~; +T(¿C\7!_ V !S4Vf3,`*zp4C>I4"@M\Ȧ⛡:{#5/Y#z?dK7WuCO4Š6(! оyQP!rQ݁PJCWt Up4l Ѩr#VsJyi5%Ũ\1 CK]*Q՘P\>7?[ǃ5UjΞtQj {7|UBQtnSSD`TcNJ#Zk'R;)Ϊ(b8t73["D{*Ŕ!m8T7W qh[ YYZD#K/TArut`0TK"t J!恔bNV{˚s[Kw^Q'.( J@3SMӜcJڵe}3 (ΦG3쓢O+̊FL.1X<]? 3=.b/ z-']NG߿^9bVg(/r? Kd M\eVr;G^+ 9Hn6OO^PfTXޅE Jѐg9:u:ǻ \Z:n2HQgZMLK׍5ݲMnuh3W"Xc&^M(+}>ޭEL75Qbݚwf4ջա!\Et~tnVtre:ǻ&JPּ[6ޭ y*SU]N˻ ڋ-W)}[nj(u4'z:4䙫hNIj٦wӥ;]V2e|^U*@WH*Ԡ!\EtjbU}û)\:ܡw˕A>c]Dh/hwCCfͦw!2HQgݺLjϻe3АgY:EP6 ɕARA*{L;Tti.54䙫hNin 5z:?ͫbԪ4e%xשƈx`({eSeT+2t GZbGTSeT0uDӡaj(&6+1T xv/vX-XZ:TK뵄r4S>TֆZ}Tm9>lUx@[0zo&Hq2_TA Hϵ0`>J%!"r"f0E儀ӢHcԀ#>)__R{iuKSVw+)\.ngzUֵ0%['[\I%C/EHB( Seġ-3ʑ !`g!SN+f0|ͳO*30)8@W0G ~ #0BQH0Z? Bqt3Ԋ)Sˌ &#YM c$87)B"_G9w"cD a@ZG \1 pT;17{.U",8Xrne.Ŵ9TJZ aq4% BYEQ1:"s}zܫ(Xfӏa>\c :Ņze\北64u2)߮/aQ3ze{XsWOY^uNm1wǸżƑ\xÄ֘Var*Wo:sΜ"pԛ!|Pf"`RY'dz931WNg2cs[N:HMZV);g,A0{Ye[+%ex΢#=ֻ IRDf|ŢWuOSFLǞFZr4Y4#ϸZf٧*N\q t"8\Z ~:g3Xf?Mg,@%{x4fsꂹ \wavspP#lHuC󇗒T² MhmqFharu\A:^G-Ofow۷vAۓ:@pL2"G.`G}hDBtws72RKs &R6ӡQfz>5 BȆ[Fvt ݑH7p.Uz %8DyHC\k\iDeࢯ)"K=k;7ՇK|`iA?bSME:4|jQ+%.e" %Lpwg%Aic1x-j'瘈' Pbe>z=uy{=ؑ8>c iY(=U)exKy#7Õ9އ R:RjIuO_] s7+,U.Px?XS$:R ,ECRkE~>9CQ(qs^Ch4e4eh|B//"79Ъ˫Pg"A8V.kGO!*A vwuL"Mӷ'qLgl^?.5 GOrRJa.eHo{$1ⳃ" -C"%}_$ϐH1-g/9%1OGO9Lp"x0ka a/DMw{)͍JIR ,))7~D0mJmJ72%pdXR)N*MWZmeaPϟ]NTZ4% q}|9|+IrѦ Ar&V hvՒRf`iҁ},BQ6D b R1g!mu#k6vvDK6#ӳ͟|6#?+GmFmF72#SΫGDZF0RGZ[Jp\3ʔHB,ZmzSMAnyR}ĩ&n\&&xfY,$\=T%P>X RE0O0Ifu04՚Ikη /oQo@AvEXМln>N{nT13Y|s=e Xl4_ND *Ԙa;o3;>)C"4s b㑰cW*^_N'r\7_Ux2Htw)RMzZ4Tk?ΑU-F!`bIkKX*ا4 @Y-Ui5 "H<bve^0,p/wM7bXDFvk%&1{0wsm,N82X&=RWKL?3lUl)U(-!R _pTy{ .yI]*"K`R4 u)&Q0F;_-0VAN~ ]_##IwԚX@QFJIa E)U a绾e]?M$G? KԦXf;Y3& `)J[RK6g淚Lu*?&< !F/L GWEgb=HPnY?LAK \fq6gq6Y`V7D()S: rY ँ|P M@ ԩگ4kVVEjVQ{r1jvŴDAO+UOʕR性F(\1\2:a g:f+:g=r(Wۇ`FǽU+Z V~oRښP9ޖ!zpkz5ry$Y ΋V3ZPG+EU]m7E#|Ԇ] E'4C6fZddfFB*3 ovD122RA@}Tb74T~Pq:\6 :BBݕr=_{D*Cs Igi3P-IӨNSDReM6G R\BqJ+Vc.[iM\iFs MV9kS8< kmjۭE`l&ր_j4fφW6Ϫ3T֑̳Avu |?\޻4D|3.'6rzWr#5joZhek׺ՠk\W=L$ oZ]۫qa?W,3 +nW`ҥ {1끧9yiΈ+PY-L>5u(L8a$$&LRծ&.G’=A_dvC43PepUN/ їi Vu*\*"Drt2".ȁƧﱤ5p>ؗR025CtRU(عtqzRzz2O7q/Ir#\iHĒxXtq ` a_5}.g fXRc(>U#4Z|E*N/.o˿s̞G2;o毟E#bmi0A5, ^Oy%7|z NpGdjwFmW>NŚkJ;ןZf; PXDLp`Gc :b, e0 !ƒTlT4¯K#%CWE':`3P*g(5b {fC}נ֢%}28 OsBŘf>̳h4^7so)\Vv_iJ8sMDVLQk:ͪ h~ CX6T)O vw3+m)(UKbUtgi!apt\NR-qҔ#\ Ʃ^<3m8ԃ{p"3||( ڥ 'w,N%Z.%ڿB6X4Ft :.k\?Q\+EZ2蝽.,bZmK}Nqu ?dA%enu -g6?NSs&aA(A` >e+5%"%+~.Xj7n;׍WhZ5¥N*`gG͝t@1@3j12S޿P^y,8` M=Ga/"u<2ᓃ~?@QӜ;qm){SCyXkxRp#k #p &`É#!uHf;[=wZ;o؆*uRѤSE7el5%IVzŧ?ڮzZPp uB 6& J%Q?Q^qKKJzëcI.% _,=J/*u)0 :t򲺴Np&>A. 7LPF5cV%Ua K&Hp,f>MjV¤3pU Gm;Cv*E@vhZehQC+[U)s5YmbIiVR|"lb"Ti3n"\lDHF.0R)qd䏑>w?iG*U\3fTqͲ+16m-[JH=ا1GC>nj,;A:˱!:mweqz &Hze<$1@M0=I.-/"ټwZʺx}MZtEvSVHL|_;|_;NkN:Wr=zaڄBYUA蒅"zpUi"_?U)} S򗛲$9sS0вe(~[Zz )-Up C@$QOʛX}-X8ꐌV(.ŀ:VvD=cR)'ob І%RؙNP:k?nK:8q ֹ9VP2&ZQHJJLXr4z`*I8AO]ճ,|\&>Z({)ms\۩+փi(4ԬeսV%pV!\]m+Vc@u_gb_l_✬s`bp kDxA5~ r =|fݖȕb@+#GD.q?DԢi[ w}DRB׭)Ik$N޾)>C0lb0;-m]t~.%’ ;ZGE9}2nwvrj$ۙt'ǫF#O}Xk"␊wҪ[| Ψ|.N_٧K= U^;g3TԗK`Ӫ^66͆S0gpJ깎i"=Jٴ#I8io>uُSSN*#&>tʒ/i;I9sgBX."$:\(r\/A[F֨Uo;-ڤ͸}RgR羓#kRӒ{Z~ |kh ,v;-Zkn9UԳH&HOMn<Ĕ)Оa\N7DI\NPe@׻.V:K,ױ mU*cqBZk6p:<頋.]RB%pa1V$:u^$T oM(R&)IhЗТSʧ%xohr*B$`}^[9*hmt8 Գ3.Ɖg0"xHo׃vh(ݲM2h91G/F2SldX$?7Tׁ$َD6LyjoM?ك1dld#*DשavȨ*tA-p)Tv=[73$hs;p9۪= ^.hgB}#{Y=}e{AeztnܺT valKbh;yY-~ BnLFanQΞw|݊`d P'7y}L˼'h׊sSs4a,J-җQW0'nvOQNIm(Y:4Am8XtFuzLX梉s " C:[2z')gc!"VAv = `lٵP}6H8GY a,dYAmI"bKV&<CD[ g eIu#kֹ.lI &y,tyŇ9˷x9f&ʢ!JP VvFXA)XU$$EF >of=m0Hnqm;'"$,F_-#'8qFT6u%ymF!+G+d⤍u m(tvJQm}Ǩ;ydq7JJH)N1jDʻpCVFXj>L+\e|.[JR $M:P $ }_/Qv3xr\98ksz٧r*e]%cbtR%g-@o|v3꭭׍8C2*CGO}_IZD0b}J5ZR8d ,lƨֲ"! <e8NR4ދgk#?eV}3,N4Qի\gZZҊ6(z ]1WC;| m\iZt YhT-dSB (8>Ax_p:0Tn?}q{Ǵ]#?-?wNj7ެ_;%IRO1,}vzvƗ45jѹ]M{Zu~S5*#)^Ʃџc֟G@rKGSEAEܹ3إ>%4[tu*6_G~[vu< OL)>)=X CNʙhf/i>|t]PES> u n޾{Py̋oI$Q@< 9<]z3/"1"e9?O&4Hb.!2s 'I5 qW 0k Y(P3R-WWsK(7Lɴ٥E]$#-d_<}ƥ@v61u'&+S 12ԹGMu:,Fw/U49u+1oXswپQI{jXcTdZib͡Hsh\TP qz*yf,97ӫ^/8U88;eCi걵yʆ3MѽN1%M|Ωn\VZ__$"9"pYh"T# $iC%HYrK‚%@P*"ȐHy6RA $ 1աyo !0(?̓cc$ "k_QK:%T pZ΍CuZViM:^V`/`0q(nc_Ξ)@\Z AK28MV[BM=X,c3M/7B=FKb3Wgg,˻xqwsˑ(K)E\m^Й &h<(L]~ [)=1q/ ʩv=LO{]ٜ^kAs5S{o*,L,q#WhX{YJ)y){5iZ޻)1`YN(g?m<9'8k4}9kı:QmKGNDFƷ~[ߜ~$fW6I% 0Trb/IB :qՍ.rSw!5Z>Eb[Y X9멩ٺȭB㾻?F4 `J[ X~S$M'XlG;A| HH*\i ^&?$i>/:(T=Q_ي\$"26d*D:J=K`΄/-JW2QqRXMV#Ş1nC%kCEanp1]l$ܬM>^M|m?O~y\pM^Jt:4;dOY4n6SHm&$$pL.&\ W b:PL:b,!r @MK*嫫kkMD'.BEgڣ eda) tj( Ĕn=2xMVUc~I N1FkRYzhVZyQ qAʰ (|;D |SPu˄51~:_ fRI+NTkJ!LYo-ܮi*Auk 2ǃ@i]T3M5 NJk.Ne abDSbSdS GQ!mzfLR->gm:͙BTa*%:'|mE8FzEV.:~>)N7Nb)lX+߅hXu{׉n)pt7v̫ż.;9Bjމp d6kRB8"8Uv{d{*` ^K g4hx`=#׸a\ŌDĵ/Q޿层%28^ղ;3=x>05ܻA=k$)ZPd-?ꢩfÌ;s@`*H]n6T#!08,VԊ،JHvim>l&QD f2uWAa{ōӡb6X{Qz,8k#)F~XD8J)SPQ=xYJ#UZMQXTuɳ+vZp(>K"c9VͩÔzP_c܀v r,ڻ]>.\zYdj2SBLqkX'nA>'RxquW 8kH^ p]g{4fqČFZA:k,"ұ̚H =_q92-q<`"? )!cḄd-QcF=݄(:Hט ~,FWo %S Lbs?dȴ!^ .$+ΆK4]]VzQ5Igo;/24U?XK<9e -+}&و/X;Aj0`*U\{"k1CC#l 2u"ه )(0{{ GZ=bD?V@7dkα i`VߣXV6HnĘ"%b5drlR 'V`N8o_SXm -)y&;S{K5, >#ќ!(Ǒ:UfDzϫPnJԺAa, -Pxh jiTJAj߭h*x9cJ1`ZjYlS3RJ \!;x.){e6|(rl"DZ!Ɓ|n}cf:)N$9&v@297" ~?%$A ۹ ݐzN(jxЎ ʼn߳a܀IEFge>.Qp2.T@Jx&s?(qzeuե7(vиmbn ?B N=eNsfJTA:猤4<'0<OdY¢qxp g)(t񤭅r:AQ)ey;QP,uAc-1^'ԇxE83#>$s9(@wm}Eq1g|⥑Zzx9TxH0ՂТ9}&Y""Oz%Ѩ ܯI 8]q:yX\/M765ٲ~n1`/O|g$rYmRAǂw~>@*hl0uIJi,[m!L49\*NW~Nf؃'̕3>?D 眄BN"g<;h :s =:DWhڸ,3XUߙW--䏘uOS~JH!5xrb3:66#fW9) 񥿅i V z F.h894MPʄ< iN4H]QA'QXHvB (jpvvoggEgB,q:. %:'euȹ"퍺tKHxkޙW~Fc^]Oޭ7Y'&ߞ?~8d/ALWZڸ SoBBBBlvr1.P#!7%zi5anBTSYQ-eiN^ggaRVX6,IׁM^儇}A,_Vvǜ^Ơfד 3ToBK!9NzH:P x,T,*5"h*? V L t)x3[ܬ^lvd% ,Cv+7,xf/)I]a*& Y H{㬸Λ4a"Wr3j:d /b\zԐ=c9Q9r(\^z>@0#KHfZ-q0"1{ %\bsu1/DN2c u 'qg}Gy'%g|/a J}Nhҽ˓݆tJDA ҅bL 2z꡽W?'~FnRpN4q%IF>Bx:#QP^ӽ RZ{'II+M5$(>(gtPh^黉#lr5 ?/?|';>.j:yf6׽\ߞN&A;I}d4{YLjw/ojf/?K\ί~ -/풫AiG( C_2Ŭs-EH+95ivյ5Wkq1$h0ȁѐ4*txo$2UHkxh*U D%s%A6wAi/ JAsV&8`#K2M{Ŝ5`XdUtЬ⤱v}g%=P0`rH9a: S)j}`A۰QO5o^L20 ţN=ߞ}x7Ϗo4%ezWW:3LӭBAD8'TT&"T":w>?Fy\\JV$V<çDkpfːT ɨ+PI_TdUAq=A0#g>%kCLO`\KZBO?G<4n3IjĜg:&!&K|ΊԹ"'(ӬjBѕ!'ޠĵ;(IR͜#˱h(&A%HK.ɞF<Ԯ-ai@HވDވ\p鱓3z zhs] d<,d(9ett}Yaur\;p!}g9 ;d)Lep3dԓP*Y$xRG3 c\ ǁzN`ukK% R w qzcicL\qpqh$Lh#Sw֜{.мlolo|8?)ڰ3(TBǽW}_xvA47Rûk5#+]y/~8u^mb |cV&#*-rSWVNX#RYQp*K0 (3\$^N7[3[;6JrkI;pS٪™m,)X,ZitJZ)WRK[CPUX?{ƭʔ^RghUa+vٸ䜗bjےT65$ 7r.5h(^" S(xJEw"h ERKiqɔklZ1d%i,xqiђ3(RRͤ.TGrQ* K&eh_R3NkuhӍ%g:+ E}7 xjЅf6S|S~e]rɕͣL0Z|[,pnI'BI h-Z[miI[8;GjqNkħAKTP42DNAXCRx=Lr\EEάD#m Eކ>?;X{F[(u=\iTS59UÜS59UzNU\"O#P)% Ac4rAǠ /l //0%B(L/'4 a.~X.> #i'8d7Elb-MF{T CMiEaߗ>)|<s>aAC729g7IsIT0-bp <ڢ:}_ }_A^N72ꁌu2*IB.4=B- MV=p4K: l  "gKXJ!H$n2 t3$ؓvaZkYmfHU#EN\*3*3*3*딪8UqJN%*[5IKF ,&kehTZB!%h <'RAR%"s񛳢l-rzgy)@@/ǎ Ni8[cM򫼇2ͱ][?=4{*'=RNW??XLEA2Fm.a$|45?ů|{ q+nB{sMa555!H *}dZd.ϘUd,\at`GێI!E9@* pkЈ($E j>٩8熒`i 9'P'DhIIX1TBbyzD)=zJ=h!J`1'0I u8e& }q#!(腝}$y$ D?=uj6EM|J$RCGo pF!xIn>kN_ICЬ!|B3S`@ڜ$d⛺OQ mj$ J(sp=Ix#1uhpx@!x8Tk RAf=LfR0:D!z}%\# 6)BzQ Z˾=#9K1==c.gf?`(CS$2ـ9.tۺ h0 /2\ΐ{mC 0K= yl0z:p2oy8 -<#1V`a2D.Rf477@7kJ!5%Đ|mS/r{"aB=R&O D+ s){ybUB 승lDR%8I*a#ͧ$7dmrノ{ (=7S4`ơ u*Y/u2 ]iK+{(#qCg@ oleNUH"h䛮SKyYղbAhx^YP hZ0tn aA%!&$^[ ^b08 .8IARSGbHΎ]nY'Ũa6S^v44XTT!9^iQaa'+ Kmh"osmxPFR%y eЦA<)Jiq SfLYE`ba{F>ii3$R|8W@9cDB\6i!8m aK$ͩ#ur׆Ů0ƍnqżiPֈϼ/ ./^a&\Cq,nw˳7'ZooJd|]3O5'g%nŏ# iY.iyq~N F9ogE٫b'.ro.@"O\]'}}x&6sy4 -K%-Kt*R(=85ScYtASåpgz!k;=N2s B2I[:ݱ,Cgϵ b1y;N-$J|@K֠͜hʵm][74V`31é(ϻt';VNgԲZ2ys'l x꒔_L[Hq c^GW(/ݼx-A5u;Z{vtKſ^:Q0O1H- TeU5TYjU\ a0CsXf晐DO">*n\ KHآRN^)3bIҔ}_ԫYߗofMxtieǴL&|REI)Zp"3AAwztqR}h2=NאhZ30?+*̕1U*ٟГG-iMM!+hLZ"Q=uu+6.:]C"`֭@Z] +h/Lq8yƹj3$[_NwtnG"Jnֺ!_9D؍$l9}R9U=w}u^4?X佣Qwv_yIՒݫoɫMKnTT.}Ecxo>*7jI,r_{wZaAɂ!x̞|f!{!ѩfu~̓7N&w7Y[9JVs[D2y퇰M̒Ǽ>J5afPRf yTL5ә'CN0^.jw5Z[7ET&e%D 0 Ils&ղoۼ:Y{p-:fWw}nZ+5mxsP TtV je?5Fk%ݾnBVۜ}] ]tSbiSu:ÿBgԪS?/rI#Gz(5ǯY#s]A#XOp Cz! c9!`UXawޕq+=4p378 b8q^N,qZ;[MqZWbX0 e&&*F6ȔS!0U*O@s$ ńHH͈' A]aD(."H$i̙ c89fUQjܔ""TӰ CXjL´I"CJ% v&%$J(9ckRT l( )b#h oaэa7R eb&UT OTig5*L-'x ,rT*ITQFE8yN>ZS"~E8$fpF\)ɤ@ҷ'H/ jQ"n1#En ]> Tιh4# c.NQhjaZ=yz,C,'@8}`k*Wcz@SW?h5L2OeLS d?<,{ޱl‰=-U/^.\{1jfݾ7bή}g)Ӽ;5/.3_wҼki/w!ۿyz +W/x hJu(+!3V;v'm;< 1\C,ˬX1Ɖ)(S&K̙B m9M4"\K(B"01JrŌi_C@˚eٺ Yh!)CE<!iIf~-N53kDJ"C$*SؤHT4і3X(LO:h >M|'y/e,CIAO4ISI< FRj Zl((Iq"%I?b%[ U 7P$[YI? 'Ocw9Otvɱp9؄9O%pdӿ_ȼhg7~6_ϭ['>N|nSlT*&&xoc y~t9c8/LFy'߾ia`>$g  {"{l azZCj_MCDrH*HbF腘N)b<ɄN28!(LbȌp8 ˅E4 ֲ[`T7k`0Շ#˴ Y;ja,Hc]K. 8OÁz_xKSb2@:*s,9_N]9uy>D3ĵo[:Z#`n'8rdFp%JN:N47x<-1^$9IB!0 EkB.V!Ww㩉(j\9;Gr^7߱ BDV Wtrt㢼\ęU7Ilt,)Y=_i͉xM@Oh|^w}4%;;9f@kVXBnW\$B#TcLyg52vb˨\ө j>c-td" TKL"٢Z[lOwixIc@UH RXu~nZnN;hm/)i@ڭ y"$SZ]8@ - }G)ٜ^4SU!!/\DO)Jv:j3ž֎3$ZL*${BE1"HsX5+m,\eZޙ3i)\0'? =>z:߼`{4u-3\c* Ol_͊%.B 'w)T3X2ZfjU B1 }Gpv&S '< .!2Nݴ*$mj@ D'[{wv\WU !/\DdJ[ڍ#T'BA1 |GPNaܜ^q7 y"z'.luUQLBq+#8G38bQɻjhY֓ 7swIjE^2z}0l13 ʽԂuN<Ϯ8ty 7m-\xi݇`1C~GY" 򷕲wK0:K6ÂXRh%OaKxw*-)NbbK+E6RJѴ3I5+E!R$t.rr3+Ў)n6mz }=4TK@ٸ9Bu 1zv=fr ~I{][UoEꃝI$;2qac8BJt5$>g c~ ǫ I r F`<}>Cv:~Lc16fF@Vל f|g/v<̾83t|(iʎOrò>Xʾ6XI)שQv{tnJNxə,&yS5_?1hZfˣ/;3Fɣ,3J6WnDԣC$9>LF%ʹxfRB>1~2_js_4hn&}IỦ=83Pf6⽸81`U3,&\iq(r)7.P.A|M~F/5zS=pGfm4[_4h S4D 3";.Zi߾qQNs9^ atSO"rMYc$X*eñ?/dW+@6_VmY_VΗ^/rm՗^BƈW\!:r^r(sYz .RӣO6/]_k8!-BE{]T|~.uC7B"wK^1"\zM$w!b6,7N\pz'ʅL,r83L^@1S7tptuv0:]2##/ k?B 0֌j?}8B,ex{9}Ų8-n"}\_EuI&jMFXj_][(Roc ]U9jT17^spPl6W'jQ yx[wWOLa)Z_=0^5C-A5 gB5\q][7F?-/!pjj1I`ւAB-~\yn.{4%HgtпE&jfB..JQ}M]uSZ6zoIpy}6tEX5u< -9RL]iŹx˹kYOZ7NH*Öa wam-pqf+Mfaj[kIq5&l*<8 na66{S],V$oƵt?|9gŇ4hb\ծegn2sJ wimok?ׂѱN@kQr;wv{|qv-"cBct,Zf8swRj+zau߿3;@xUp愯uO^wy2x 9kA`jwA _9q9@PN`^dLhph5ХVph88 `Zf(a./t: $=Z뤒vYѹK!%u$mds)ɰnùIJn&)9)kbm{/DfDu$ŢՊBocj)B:k=/P(TA]?tĂSP)}:+,;m ф Չv֧\E\r)YtRҵ{2!k0.XaEBǽ43Dv^d01ˎ;`J3BίqlJ`I>~1!Hpz_8*͉v/iE[WbE!N9EC̋t|hG V]s [ƝR^Ǿ)XyǙ ~e^8PЋ&iܱ{zh ΨzIcav1/]Ḣ}v}L^[j9{wyiV[WB/Sx^)G]i {njv|>@[}a~h .Ya[\^#Zn\tGzF@ :Y1ȬFr`Й2LI_ `Xu5[9m4\k.zBz`1< A=ԍ,6I D! JrZ﾿7G~vWxU_Fv-f 駙{ L |(ch&$f\g m14IņHèi1UdT0IOdޫrd-,EX9z]8KٱLՃI` Bjc_|d1OE'#ثOδU=aJ'ɵWh>.d6>5R#EOL q=)h2ꯅKh\?r3R*mbx B&l8BqL#\Li*ߕܯW8ڭfk1_S@D ~LL!vlIƈ_^+ ez&1^3ܱuFK,r X8fZ t2/SD;Lr2@KaŅl8'~<}JuУ4~:Il+/$moE*\ M& `a{'zC$1 ;j$#)QAR%L4O3h` p%jIyRdP-fU5:N%<1 H38,LSl3k _Dm<`1LP}Z` 'X.9PRɤY ")ߌI@b5v:8d!`7ѭ|qd̟#Jx.G\ S8E|Kf;w?þ<{#/zF'(t΃w7p&DbB((^to&oR|':G{}8K)sp??>PnZ#@S"9 N?zvט FcMˊ=v:w]Nmf8e$&É,c#5!N2M'6p9o:EJMIPjv6U9g `9:? )̮wKmf^(ᏻtfhDeI6% ckT*u_d2;&}a &HQ-JA@)(VI^fE#eݽ ;N(P%.ST;")ɢbew! /L kOb!s1-b=oP9;Yz̀u~^K)!wZrh$SzԎQfyԻqȮF};,Av 6=kT T2J2YߵVRG)D-E}Tp`r7!bͻx"T$6Ihx2P/ x&aRzm͍J)\! 8QT/A mr5–x,TQ*=d3ABV(ȃtx}Yj9K $Ujc^Fh%4MaDah-pi޳`}p8}I+{K A,@orQ; O#l16IӐ} A+d+S9}=y˳| zP7iF)j0 (jDiO-Z;[&.?^GLWSq9K&b#&LBwYXOK}BS2Ε2T5kn M۷(oR1EOL%Ga.?|-9G٣uכd*dv}JRt\q"+-=6~/ϧ'gfr:߉9FROyG171՟/|b41&r7:/~`M+Pc5CUP^樽f'k鏕&NimP׌<{6%Z79]d?G)zxp{ZdXg|%1oNz *"bAi¿0 9)}ϥrǗ~CUI={DK%gguMŬF.=ݼr)} 3["Gt/u+ QRz1p6hVHKq/giPXS.㜘L036ψ-2;kTc^8+)Ɠ 5ʈbG>'CS=Gm9?ZCN@I БfO9͊J +T}y$'qoYpaԛɿLO_]+w Ա>AOЮ,MsIlLL^ b5xm!(uV8^?coK2>7BIHRg)i[n0*9a@sTI-:ϵ9K:!Y( 6#RM),,GQ˂U@5æ)TVѧS4>9e)&iHTW zr$ER y޺r( g(%PI#tOjЙZ=<+>|4y,wΣ4PŰ!K/y>umAbg!4]Y/>fD.sp&Tϡ|O|MQ7/~_P 61D赬frXY!7"tBNK_~lMӮ7p Oj@[Ó=KqRTR&YuRYOqlTGC!B< vs=پS| É_Vgqgut~6}ƶDINչn*i"Z%9/K 7I縜<; EXݙٮ1x}Ϙi-E*Dl?>oq`&ь|`o:F( COObRQ-?F~//r_F=">3܏>t\>u-y}G J!n05K#5BhI5U@e_l~ ٔ\[K̴ URNo] I_x^"W9}|ZvϿg,`L  g:95u"[阴e;d0-e$vtl=ox7O>-4U_M3Lq 䕄*}v͕'T PX\!x]$i->ߐI)3qhKL ,!A\mqK[E/[.CP59QJXˍEI BXŤֳ\/ rr% ~8_JC 2֚wk5~&mz3k&xM|9 _嘓 j~9r?X@6ڈEЊKxPz)3R>m]ȭR>kuzTFs.LFn8!*IT&Y͂A!`TwWY)cԮ'À%f iȡz!f^Sy^y q |gG,6L8cW(a8F=MxT_Yb(^p`H}KhKP7F JfX#>ȡ6W:W0c5d 2Z)+{.FC~hIF|0vK;nq7Юic\`Lvx #Dм[FvjF(%:Kݜ;O~8,ӈ*L'N;8 adـY :{dRL2s“,>NqI1*NN!̌*Wj".ZdfJ1p3hlʬ%ܣ @ Ebzz2sZ&3` K'-iwW^KBHm`r=iOYˀ2jd*p`BP6pg q[IMj'ΑܛǺhR6kRVGPdН{:)uSD̘CyKrBq M'T$nVJ+.OI^(H+$v k#']OWÕkrBꆠ.Ǔ*0SQ$!(} By˂ \h>BSQp!F[ɠ0XCuY%e-R^e@jxn_uf !o_vRn .8n 48r`AIn骂K0.$ L ۱T8BV8TJ\rXV Q[rOjp \mPk$aI֐D/&ߝM?ߞ䣡nY\h=,-s Z(lM zBp ot@3!Q2ZrdvK+F .h>9 $B-uAyR0A`w9wvج=,+5 }(ܚumͦ~3tErK r pD2{}0hy଀S"w%jj'4jN(ֿT PZ ){U9DHSq&DJzCe L8&FJ0^Z&rA3@։ $ZpoK8jtrQAa+%#t!޳ɩmk*q"sM3Å=J5MdO7eu'Q\VֽjlP&{D uFl,ZhWi'c=9+r%%H\@hMËDjsXք<3t!&wE`oEk_>κrhmn>@ bC*¸i b"x<}朢`t]WRci*yFA-%-%#Fa02SGmͪPiqΊFp)!%/ [hojx5 Rю|)xI.SIi@rx;1& 1`@c덖l1ʩ`fu݇ |rog_-8';;G>Bb)rN8Y ʡ_JyU!{/WZA`t+gWHmWxwv?O^MDz"ge}z 'FS X)/% |BwVPS){EZ/'"ڜ'؇d'\y|*]?~cI8!ֵME$_hp`S]U7-HOok"X SD*vdvm}\"cre'}ҼeN~ݤ2@`$mx_#>قNg' 7yLޗw2XCQ>1F>;;XydpU>$Dİ^yܥWKK\5#cx 2jeGcC3S{_] dfӃsJ5 }u!ޛ,6 rvw[ʍ~Ov~?ߖ0-H 쭫#V caXW88Nc^qq9ʝ\Hә }{ w9';<h!2h[㵆@C,v \(*ה90og4Zv})J]6ә)lXlevچ4fTWzYwVȅǏE|>u '};Zp "T=*8z=bLzY q:C|8T X8ݖ)P^Ż . \rVӖm}/ޙΪVZ&?bdHHRބƄE*GޱNzh v-"g6/}P# [n@&ֻq7UtMc.MXӥ=-% o؏!$vsYvn`` r2KA+=7+W_t@R-Ɛn?㝔0Y_ytVےDSki #!2.=3R|MKn g!Աpkh45Fz.5<<j`JZ8xW )hԐ(,%!BI0cJPKa8N$,+P9+l tY1(EmZyp>A f7H 2`\MxAE|)"FInPoRjp߁@/gf oZcl4i:VF W$O?˳z {<Q+@e' @Pݛ}< U6ygUV𱲚>]GӌYG X}xrzUoJg֬6& TyIEyCi %ZӣAjNٛO5DZnzW\ZDHb鞎&7KoGյY0odiXSu4Yp2d:-"\[TTJ+V*1k_|>y|w|!,uh[fڇSEBERM jC"Ae:! fO[_3N~yRkLߺ\hOCbdW%=KxwuSlySߺ#{+=1%eYCНJnwJ9֕+8]ǘk}`v n#kptbsg }'ֳg^.XPPђ4)^Gi-ZYN<9 kZnF y:Hʶy~41GPeR 0AL;aDAH5}rAxIJk&k7+Z/ҭtAHHB*Z"b1az= RQi jki2w ; MXS%)!P׵h*N{l7q_n3ec(8iEl⤽m[.~2%15au"D`QRgXS9/mQ\pHگ7/,Ry[)꥟Zd2ֈIOva?TEE-toj vKǴRula΁FHeE7`dtO8M=;[)/1og)'/#?}!_:_krEU91Fh"i|0n-#`T@L4e&+eJF (!TGs}TCT 1oTs{ڜh9 rS oR)tFJ J DJ!^)G2TUЪ1QRM.+T%2K?m"|w}ez |vI?U.uի\uӨOUp?v2~qy@y{e$w.PnDu! ͇?Nym,[ȊZ^ZWuR)r!{4R aY@qQp'h,ꢔ K ~)G^M0"%V8ivha2iaƒ4ߤF"@:$ EQ`Aj2 FVFc%IdO4AUQvXB `[V,\^ɒW3Zj9EXYR%)s,kza\+DbY2(R6l"@U ]tx^_1}*+/۩vA*~ȗ!(N8{# U-`l)? /~[ F,-_ ӇQNQN2MͷGח-hN&b4ź#13$]3I' B onpC%&Q"(ʸ6Boɼ? ރ&GPb+#jɘ1kHdPnsĄiđΦ8iGr)Y%Oh‘3[bNtpC\{pPl#)HJƒkbih*`l,vꆉ>li]R RswiRod``c*)Ps\.5X!Cٞq6+~). 6RSgb¨J\2+͋Tj-{Jl5<͵d\Yf2g[LYjG+t+M7hbXFOd@ō\<>&&ϗY.,@80"%0̆D R*RQ ƭ8𖺨}Q%ԄvE˧]F~5\lVAfK;9˯K@,x(")8]zl0YAOv|rpb:k%U~w{dZ"_;FjG1]߉6@$'IVlϟފxABG!etHgPTaJTy7ZpކJ{.L~ƛ BSI,Qi*$gb̳.8-)꣮a߲k x,=Za{Ct0A3e ގ9t kL _uH?Htq;ߙf;^ =4=c;d.e[MS P\K![?-F?QRJ+&InY֛hMp'7??Ǽ ֿKƨiU 󍪭q+E}zH̋< p.z*@P TCMR Hr=xq6KU2,Ց|I ҘZw!5m֍s^cǁ;EOywMQ B\ iTJB/5RI!#XG5!Bc㦿8 :'|~DBi_Ag #>zgY؎áXl Ke( !?}tΟJ1YU2?_m@ÐBzH3 1y^CʗJ捦|A|R򽳯IϾ !F/ fżfLQ27 4>Kfj-R_^ה}?m_vk[h/m #S㙓;. Fg At{m\211!@(ϐ(xcT9eRisgעͫnN_r6Ė}8W[xOF}^3O'.R/yA,1`\Im8A) 9Bq{FK|eChFs%hHp7є4r_F 5;Rt,aj~QܸWM\] ӣ)3xPǿZrIQJ }#ˮOkŁQ#hˋ$!ߙ⯓swnWxpsϧ CtiQ36bi~T/=',E-EU<?3T\MYc)G 51Gl_R4LQm$aKܺetA)<ʀs:_L[.Հ%f239&DZ]z_RjCrWsa 5aT{K ~b&b3UNL4PmJM*eԘmd <EsW1*<1ߖx9^M￿;` P8HI/4 qJ=ե SI؀SHDadևKD}7ryB3SM- jK /DMs6 X+)P+Q#ےTL;4ef>dsWPoԗ: ZOnɞԞlc1}G#uoP## @\* BUG%"=W)JO[+45&qQ=T^MnWsd:sB)CEg8GohSJ\=koGEЗvw8Ĺvo䲟PpIJR %pX=SWWUC"KgF2HZ4I2L>ح Ķd_̴&ݺ&B oJY#::MCPV _j87c0EHiN1V4SE9MZ4gȀ*"H`6(n^'kt&08N[ G镍PcsDﳺzi AE 2n\oQ7:IMFRI3DGR^фYa@-ZL7H3w ux@ZxFsUNEP 4KxE˦Ѭ:%~4Cqm)1h&A4hѐnJ1*B1<ηc[Z5om)R/)ƬNH2%3h@vqM91M2nt T۔r+j0(Noc4H 01vir%y7=K!_5](kъ-u*=s=M?Z@D.謤O䅈J#TPQ,~u Ûi}ŵWg5Woo*0_/:hw0MoۈD߆Zs GqtcmˡzkZhzFbUKA[R>eZ1>z] H4Q*:<*&F[}3봤)y$ԑu<$.1fgGCCk)nP"PSt|v=M&rA&r5CGDi쬧ag@(ک TY+x+ H^C>g%(cl DiڠIBfPyQGroWq-Kw2 \R_*(0kC)ћxDSFFH)M@M DHTyÐ7k=_:ݹqO(DF ("TՈnt٩fCId `TBD;~fs""Uq?vKo/]*1}_gYcGÛw6P/g0ӎs3vvUbM2%O܆ٽc/}s3oAghaX/ܶf$=:xz#UV^SiFFA 2kP#uz @tm)s_~?J@OPzx8\_qu`l4Ğ- x||.ٯww߷.cǟ}u~enYz[ O%~r}xAw,o!?<#עS !+<`-y`[KOzjC.Y9?9|"&S-4`flbvM1Y~Ev|w5{/_< q3; ޝ_|],^}m|QrxPx X s}ɭOUm`􉆡niر='iX=gHmM_A#c X^sm]ArZZh# 291UvWn=xVRD-Mw_Nif _7?YZytVe ~N|Fщw?jGn&ezPnqN7-uS%obӞJm!p{`*/uymU%wATrAY/,gEZۻS/boD`QW:>6 d>)@hYT > >m!pΎ2WI+#Se7'r;uBWR.?-0bȮN .yBxl&ppCb>@|EP>OnF׭P ~폝dF@ JPZZ92w-PҊRqp2 :!dˇW7mNk {}7 \BcLi_iOLQ_Ea0BUZVY!0MRMԻ'_TRo!:2X1]]38wrQae0'$c>/གK`CbG_wt(gAE!)T9oIQj;WQcr ;sC1Z<6v"pWދP]M[dkZ v H<㝜QNU7uz#vXFoï; 'x>?| G.cP|8B%DsFSC5YA{Mg #+yp -xd>ޟTkM3r2Df5ӕq^T4B_zRmy#5@ý z o,u!RIBe#@ld 'j;tiXcow)pPf}m>= G)k&qMH%0&I'ӇANn8y2,].Xߠ.r>rlMQ&HӍlx{z폚t6k|ui+KZZi=)kfw$mhр{S}r mqc1t#̫ =b7?^B"̘Q*NB7AA4Ij;}osZs%p}^zJOeO:{:լ;Ygn*k?'pΟU }8-tx(e]tGɷǠ$03fY9ȸanZ'ݵ(=XZFO =XTvgvgi=v5HU0d EU/(8ґU4`lYq2PJ(YQo82IR3YW7fЖ}:'pr t5".x8ab/l/+AA6.hƓ &潮Jq5̑=s7Ǽ{D!-`g>fdFBAURA \FK 'a .>?IōHyTbpIV˼Zul~*kMX/rZnqMl$DǴo {b̨WLZQW٪fbckjR뮹B:H_4??X\,6}+=t9PlwڜE U)Oqu3d\:9! sԈN !:J;AƑ(,V2;Yd۸--(qNMiH4}a]J:\2F1'I2eǞ9}LW؂qBҰǢ ;HM`&OPfA,cPn{w V ׊ݗOV.WA_c0 v 1!zEI0m³6[AtISWiR?ىQɯ3]zIםRQxf 4 'ӫeD~}&;Ucqc#zHQEx0{ I|L@7mj3Jj``x\;HxeK5m j,ؠPc~IB2;g]|j % h4}<[ [B #nZo,DGe6p{>xu4HM^ &" W̩I,gY)iT\v3IS!M\a4X\ɲ.nY=:O'WC(Q+ULEFTI]V(OXxo[_G.d+:gX@!ґ,nr+$iyIjMs# rLS:qf/C3Ą[BZAPq;m/z\8^8ip} $Ho2rv bƔLVwhe\>p,pL.8.۬=N!f+s+l#3,*1)=p_gw7j7e]\f&[:i`WըM Y5v5oݗy0NW&)XR "."JY:8X ,'ݫo:VlK>`4U!A'QT 833[G\]^ 1VΝw\Z,DWgv,;Av|]NX07S*b` FrwΧ>{^]v`h VT10B3!1YrSޞq^d xp!"$zȺIw>g&Y~`t.iD~!?hFs(R9.P*y^!cAB IN|B&gb gB 0K:fd3Q"TxI( t G@'<}sl_0c¤T/vKj^]7Dut:.:^[nSd5zfT'6jsBmPh>V2bh:)qtc^QOYDJDVR_Y@AVtVH4F;Yʇ'Y+}$+jXeb,,k(+X>u{4ll1)bfYâf#mRgܡ{ƣ6؂1F٣/jBؿIH[XAI0D.41jaIbHnյu4; aڼ1`xscpጔ^*e|7,lq6O[,R M!;iqzT!öH_)"Hg IF:|%Ei:X̮wIG4dvyYDn"~!: ~B  -k!ORB:#[jA3 ( f-m:#̝֚1rЄJNp9bF6Ţ}gĢv 8Diת𠺪櫻x OۻỊTVDViT2I15XFzYoץ~6puvNeJP:EQޛ(3y)ʆ2D c=|,!IiUnr2à< ]]<(crţ&{HlŊWi+apҋj+ezQ&rfQI€vyČ8yDuC-(A )RL d7W>?7W{&r7wuqpsjWC%C0,V߾e0Ϊg[}H#d8WR_L,%3tk%*-GT,$ 敲Ȥ4z%r&Uz54_@7z  ޡ`ApgRVZF\NN}je:9>C]YBYdiC@FbSؒ;t9j!sSP*![:kj+^CD'MJp2pq=G [,h/`-"b^}`U9s:@SX#uc9~].[lD׎! bh#a!rbP/C@n FXot$>$6z>C =?Xvٙ®D&rgKaMJM\xq#Õ"RM*;(%F0S*9gj"[)&[SЃQSy}˸ۃʐ(2y~NV37@*d.DDD4䈥ۤ 2pMv )R9D$E }Xq@6R(".?fֿ̓y\Wq{ws*xe=LR-Wa1鼺V=TUق0g_ \Y(I/R"V="_6*zBS U!V@AF2PJ%MJ^+o$g-h WC& "Ie&IQEN4$"݄A:#朄wN:Be=؄.[Dx&ǭAÀ "ߤLcG 4r^WC Q\lU"Wt<+BD}deb QDj^a@Vg4 y}?)-GzQA1Ž^{-tp^z]0<$}a1t!'0ɒE<\j0=hX1SLJA5a)uΫ&=J?O5] ݟCO5~O=/>_kSOqĨYxCd:U&&2o2d:k&T0$V“^%D#Z~sI:ǟn/O8^?{WƱl{F}!nXJbDt%%u`j3Hd˂EpIa@ gaWuUS]֛T4f(s6.`iU30w&~)0\1`)!U?[йI 8U݉&{ؼӹW p/_aWFQϝEYoo^:C?oNO__OGoWw_ًϻչ)#d_Si:7+Uw"=K!06Rθk7L"k^ o?N2B?n6nj[^~b.~6a5^xTcʖwi2Qj6tׯݛJ=Me'ɬjߕݖ-I6|p彿.k?^3z-)em  H&W>̓$J9??9K.rz_4u8 ss*|98ݙݓɝ]Z8]4^u){KRto=|L'$9.\py]hR'WI ME|Ղ.!B27i9qܴ7༾>0{S^?S6-՛WeܽLgo_:vy9t#Wxy' m_u6\}u>]B{q `t(~9O=x]kzH {Ń<Խ[ߦj̥'n CCrIw+O>֛eHto&ВKz%k;mtZxr$ર8 AC+ 6; yf90RfU #+DiVh0mBS=h03hpqhp8D@8)&ZhbpLt5Pr}dZp%(S7MpN}݆>(wJ܉2Y_\p\FD~r避8Yu%u|wx(&- [|1(MQ-{ Јhڎ9w;4GaX*u`bȗKGzKv\UR4(\\QIs_d.@&ϏO߼yxv|Dwzhk+~04n]Hߕwjͥk[m0!pǡYinzUN|+D#_&`iZs_{YC7—J ,lO;t?:8 i>ߗ:!u;&*QV1CX ԛҫg3/bX`/3kPi#9igE!IUPƑaqՇ#m#*"#ň@*IŖ,Q3V&rΑcW,gmLYtw/H/H}U;N+Y)N[o+Ֆъ\ H/OXa_'@9.bX^(lo"q`R@WG\x ba1"f=F^Iq"MiK(s>b[F8KY<[GQ 6qr+gh;Xv=M~%YE$Pr5&?U[M^ G x )N@&ne9;?dJ+lSN#{B^`XYVa}QVBsֹ( Z c!%l((NDD)f6֒,X0%5 H2Z =hB(] Pm%h%2) }i"*xN1U2Hn ZsR= LQ<9|9kׇa*ӮTڜ Gi@Ji#0]&;qheC5]n.~h8Ƅ`~*ctV_:esuzѥxy gJG rp.="B*bmB$z1[ojK(H,r6#$^\0#s9\KeqTf0:ucTS+_=:T/_8_2[mJUJ}boSZt+WWTޣHj%6m;t1A[%G!5Y`<0 $8o%2j'(*InrR*]l2vo?t:fzm.ގuȢR苷5'[Xx m/p-| 4= ?U= $L|UOZ=FFQ:- DSi%q+2/ S 2 T9| Ǡ՚\GKvy7m"-7Lp3wqwJ)nzЫH>ŝ U=|7.l>]fO>d83O&'aREm?v" @Į\. `03HЍ`?r!`? ?U= 4?YG֗jVbY V(ݣ}reUVRlZ @/`gD0""x>Eb:fS"Lށ;y&mO#"^hEVlS-.ъъX\:XRK-uSQ15KQ0$pa!%!^KXEuH"~~{zv(D"KAU+͡S7ގ$R/DA.LDE0H)i y)IbHP vYɤ6m"Τ6Lj3ͤvqvLDVGˣwh_09C((X"R0'hbc)GVi@EGu+QERo ]l)d"z811㬈тvqd>(W  Ô Zl2 h~$/B (3̀2Z(zO&HtXAuO F"P ث`B` ɸYJ* 4ؑSNM{SE۳I.^C&z=jW&!電s5í%v#̅GXF9%:e8|'4eu(-ʓH"3 ~_g&`f f&d;-,mͿ)j 3#Y%tAmX9VPZPK ztS}Z> jwT `3]Ϊ vkDE=RT}jWcV2# KUP}B+ZHȵt ֡(KI'GkۍH`z)@wak~XD^%%`}O6ElɪnvcS!MfUD8'*2F?= XzF=VQXPpI ?֢O3l#&b[( VNk֢G#Em2TД|Esvո0qvR;r(#ĘZ\S(٘,:bRtlָlB1~wGmmHgQ>jGmmikdil*I˫c`jbcLGdWsxxe~v_y1_\{F܏Yw!o/_|4&Oߨɟb^g!<^ۄ!6jӟB g+;q[.._|vTb6h  3 z?^UGH׃Y'@#t"ɥ|%r\Spo^ s\֢FB:si/D4[oT 5mD!]D:sF;??D]f|fiJʦpxWX-O/rq_J+MtH fNBstB:m^5ɇٖS[o0][@+[-\'_O.-n!}(l{' ,ڳ7 R |-X.iOqyr2>*[_|c{XHzfDБ2t )UL v=3.lIFMGݑz.=[sp3&`xB:q(8k6Ѧq"s?N٨1oVWB .#dIvvZc4U-տO^_D}z,k<n^Cݖ9chkeʥVwurU3 8l\,S0Ńh eU *K!g"vB0p|{lc 7{V/\!^*5!=h?BC(YMM9(!DUQFTlYUD,bmT*M?Ͽbx[Xer;w q#.Ly~w| >耵A)d_ Ju]MR:ͪk ؀T6+ SHjI{czՖw[!8bf{z h'GCCԆW~*kɦxC, %HРC$*@" Uۚ VE: r^0B)4UHF!{5S HTIm+ĈX=b6Vy.Pҫs`7@#Z)C-$9䆤LThbdp)\{SSdc/Qh<48{s"ev1 s9=jx)`??7oi 2k'e* 5ʱ8q{o6H"` T];)6G0Bjۍd+I?O~Y=8<{ }޶UkY=ӋIU7_~uynV/IPB?|VLüwox??ŝx3J/ }E;z}zaƕ:F^xm4/:,>k<@k,|}:ߜ`nx%U6zi$WtؠIMn `]3`n*ǐIK=2-ȼtdj20o SMT2a @.,e2qeL2"X̏!1amLSdn&oV3 v\²^;ƥA&\Z/qs#"饢Ӭ ,ra6./;6h`bc9lK~p9_ҏa/ˠ<;D݁L`#=DLkVvXB0-F3% ~sPidQTzp{ҽ:7TJG}0|ɪ {K0 \C__@op/_<ş;9M]]}tC _t.}ub2 ad)X ၼ g姷muc_|h&Pιm&;qՓ)'^7WӾlj&O DoΎW[2&/񤤗.ݽl*|sR׋8o/Oo<"h׳,q)߯v,/~ر~ر~ر~xW,}K%UvuN,Ylm^;MМA6m/Ungì)qRŖ0'zꁴV+xilUu!f[EV̥rLн1e*[yIB3rNN7Ҙ[gVu'TxzPWDċ6a<l$@-_uȠ[xbyҲP=;Uayof,j Ձ]~i94LL۲5qY׌=Ho yd}^<s}Ⱦи=&nx 3gMVXXx_HTNABMPzI@#Е0*ӷG4`%0qֲu[yKٵ:2/阕+EߨֻKtDٶT@of#|{oX X{3^$$Tqڗ!" *kdBJ*˹6&&MIXNJ/bBkʽdNJdxͿ] K\u`H՚u0Tˑ='!tM& n)4\Z=96"&cE" 9}K7_V{q}.t+s\ihO&^,k*] (|Dʽc [h\ߝs#3RԷa sX~L1xk &ÒKfn~ٷx̀V!tFE{u’]KTX{Ikn)iHiby[mc# k6;1a Í|4ǿNvC6zCk{;J *.U:*ZL/k[M PbaڣiINTrovR i^D)yc|OXJ&ZwCi#up J9n L?z@(^\pU筚LȢI[1Z@@a59iuO(٩'ԓr*KZW;6(8.iyG7 F$Kfa2J#:A]ydKuYf`v*!K _payޱAB7mKVA~` nZO%#c =B0ΎS'f.NっI]Lߙ`s`I2 Hgһvyt Rd&d  ^rZW[b~l UWCdI,t;Ҝk2|SqnWpnGN [ťp~zr. ZuJ qjR.I1Pt2)rs]Lx]AD urRL B-7{g#~FwJ<"c*_}VJT2TISyl7Չv :o*FĪV57c7=<S<0@@!c%#TA1.MIiR.-{7Kq7ŏV.,Jk(9lp sT<ȓ*{²%wf'hW'ZQyW;:h?)F).=s =ˀ$I`޷ò'G=[O>~ `kmH98#"A HrǛKFOOŘiO5uQC&9Y65SUuUuu]|Rs"CLVK)V%peeTaE%xi5kI00=]Ը*-]J'DmnaLÑۭA9$U,u-^B摠1_ފ3 V x+RZb@dg~@͖ŭnmu POku}_>4LmwJ8o1y y^;e`*y^Di/o#Y $"SLH"> Qm-}(HȧbZ`KJ .,_ه\Z]sT8 |>e~" |0D"҇R" %<8֫rߚQmuڇ 0[eԯRJ:чvrٶvr>#XF%([I1gsG5VQv)V&N>Iͫ#e pp{! n x/[P@kU_(}cOMPRxoeVEC9RY9l0̪ \KԿ@ 6GHj =f9CXe|@B#Z_$?sărp јlkOziss@,3^w/+XÄTQr,_1L ķVTVtKz@={ 7J(.E>cCEBܥB(F%oY)Y{{ :c&Rq1޲KV۔K(bSk󅏺5*_ɨ}vw@ɜӤUU3$'‡:F7\+c/ay|1Ɍ]'rqԲSkyn[ϫò/FOn,s3,hv߬ތs/j~q~v B(he矸_;z7K ^2˓t Ⱥ8u^| ~y3~(8 Ś3~7c qRɄZ_!Gr0 Tj 㑓q?'935yD>B3uKW2ว5oRee1}L $?JuI0#գFhEQ$y_i+S;>3'5XĮ^Wb=j6XƯƉ•~p{F $Ho]G+9Ҿ"+-􎬰4,7.n0'}b+|} Gb՛Kǯf"="xbCd۷{֜g+DF@anrI.x>"\nG6܏@yc!UT_G{@< ->v^Gbv5&O@j|Cd="%?&O˫yr1# ‰hb]b͙][y.d1 S Ԫt+S//CY<ٮA~w}>ue!NS%,:a6`&)hWpr> ۅUa[ȳFu4JNJ%Qc/~׸,3[q،Hp:0Re2k5Jlkz$NPwg֓mBw[&F|P.$iy -dm ͸DUqq(%WDXd:1 \SkHe0>i/Zs{MRxaA|٥Ȉ{eN<>YDdpRi:م;'|0m<*1?AA[Z*)d[sy=3xbSDwG̟7H3֥X_G[uxATUw\k=.h-tbI+V%B|q|!b2K(O3E8 eJ)Kisk㎌~`930Tzޯ@[,@uHFM_t1z#1QY>Fn6Z9Z]KLVIX ,B[ vmf I OүsTg~$xʍ6&ŚL*e")e8֒smqpPs%r^q&:<}F<{ xkײ'c1!R!euh䴐,CLfcxLW־=F 4fJtY pRϻo6*8!wҚ+(9ċbFqKYui05[q،s:jXNI5n=(~Ir 4fž:D} *Xp!h @Bb۴F.3),O>>52M{&]"AH+A)"xGn!8DX J8IQ%45Phepɂ> y^}؞׈E?#G  v!jٓInfv{4ƼGskҊ&:aB&Hg:ͱKo'|v9[fun'K -Cb׳]lY_=),,P%g\*hB3q L2Xd_żeCyϗ[>roy+xa^g]?~HL=K\cݧPCV;Za G+B`~|{ML f$ua$Ʀqh&Zx^v@X+)HqE>#YY+谾%+40 ~Tsbԇ`۫E~tƎ@]#SNy'z#Ti:Z̉-8$05mA063DhՔҞa{@ā,W0AnR#DN$~Tcݣ"dp"LJm5%rW)h[J XV: +#Z -. 9L: qk9Cq7CD mNzG6K]? _`Yךe uwyWvs~o`or=@~<br\7r(&3za:0]aqyGdM7C'9IOZEe!) `KB}`<bo5Q#VKCް][JЦ]EK#ވf5 X|}QwZw/ڪ^q@[ ]x 4 #Q#nіDsuN Bv&#)`v0N&WIg=$@[>vE @^"RoU`\,oiew[!C3i{UY˧WtHbJyPDbpe =;?,;\:p4P뱏!eQ3_y#/%AEBiCي*fXߎ3T܇b9FHHR n2f=tT ė3L!H[x#%,X1'P/ ;6C!Ӷ52Q.x2B'CCCiJY+l3"pJMb?Ȩ4K4dU;8 [=7- !NmV&E#vgR3"!pk #6urَSRډ'e:L+ %G{ Aas5l/8[W#pUQē ^As,M(Qm^h?eb[}-0!X8-T9sЀ^Ru?8kKw,ULN $'3a!jމ3yit4N(r9"sbaJӈIz;9 s!ܼ!ڸ (՞XQOWēr*}'h؅8!JhO! G8<'E˧!KIӹʢ!Om(E+"WIRt1.0u&K!)0M$wT,3p&X#; r8c\ki)f6pvx;$(+#DɖbNe}uzye!R!eu㻟}oc$0D~rYFL|'י p0[_y1NNJQTiNA2qti-q s"r 3t1Y\kY|?}@I #e0$EyGC)! !Ŋ;iBੴ#*bVfi ?MGiSDi# hSBiCMh!A ?]K]kοX_G32坆;wUhk]o#WIlټa7;X 3;_&X$±=IaGy%<7r~휦7vqZ,(E4mO p{aǢb˺| @bRZaUY8!ʂ9͋RIW`CKAWh&;0 LQ ߙ4_yo>yA@ l~sڎSs&QۍԹr{lEA> ۻ`ϸXU.og2h7Y3gDgFFEPBeduu;zjAÒ dәf?LK9Zs0jaUkVs,Q2"X(S>@zJ[Z ^`+AmtFLM'4qRH5V\3w֡o-4'}Aw=Lk5˟R/;ZvwP-j%dd-Z`?e^I}߀rJAPp 5'ǞOĽ*5ä(T^H<`ǽx γj_G[,,v"!I$E 8LQmx <)d%Br|".>0 SEn- *xmaxY0Te7,vg6ɂG3FA$ n GLK)`B5VwRݟ B ~gǁH-*x{%q&?1E% 1w@H~W9`5Ϙӻ!PEdE6{TLޣ7b_ЭסC"%oMVoC@̟/}X1Ŵ@% \ H"pF#wD9r;: QqR+=Xޙ(FHeα^BApy|an !ZEHJl2Wz.4Nؑ㑒(4H~+ǣdlZRwF!ԤrwZsJd6>L95=m}`-4]g=6jgEdå%=N7.>5MF,8 " *$0`g ~_7)h$gjGnf__.;U18-u'0vHaWem2&m(o?Lpb$**D@H=ZRst'wvmnE3ZΊrz4|oD"S~'c>-H#5y֨cu|ăEt@״&cNTǂg<-tg(ǜs) @l[OD wcVH]J\kI ʥTf~ zkF:l&R9 \8S0q4cwƨ|-.g107,F ]k`qL<ͦ8#>aӸEj`UyP蕶jSf;ul@*Qu6۰gU慳sjj'S5~rw#5 F\w@$KI~<-8v=ѭ!N!s x%j79D\%ze0Y˿v6UWA]^7.y9FpX( `dBA<.*=}jO,;Xŀ>[mo?jnr /ltIG:MsJ٢XinZuI>߿ο 3%;|E<Ĥ5OԎOPQFwa%v2,Ffgy f\Q 9Igk'񢥎vMp\_1e%_oZ`v޳ɅX+R@BXEkINHQ+40*u[?!,ڟNGC=&*ֻw0|?_k53@>}J>??o^780U-Q T5'6m׍=[\"** \I l [l鹶^"X:7%'5p)*v~28h"rWBMg{8@tIz^C 1NY^@E{i:զ>; QbEyHq2.%oYS?zL߸i +q<8σ<8σ` ZLÖ3Fkk%ǎkA k, BƦ5il:\ӽLL@>6%B+Y( %.A]u`Avp_LQuTo8^fBڋu+t۵zogY5m]*nf QVɲU{qrpҕS43%6u^`zq+D%#^RA"NQ_(QКӶsz#%KDP@{/ESzBQ[xA5o'#-o=#%-%sObKXg^JY F7ah{1|MW- {L3U208szUj08 l4yht$5SfHf^" U:$Ӄ4r:' [KahR\]ٟfLw-$474d"3[vNJVk#9װPXTHݝTV%m椴{4ӕVȪu(|`Eɦ V8 ֞P'%ԃ#";Mt^>2!e=C $Zx/fRﮢ0~ h~Kbmtٗ|t'`޵0m+IǔOs}ԭ=NNx@Rrz$Jl%9fH[`"y0 +S1'.b> (SqlxZtJGz¤'6m6 'ꊌ7XO-'?u}BRe3ja[]:{lRUD%I:%,ؒg<5m+4*3míƻ}Kl{l~J)!J R\UV_Z.KM&-F`Z!5sʒ}4b|]UsB+(= ml!cM= ٞňs_S;hA}Kq̮T\֤i ρea~莭sc(B:3~Ew7v#MSbYaFy9 7t.AZHFAvgX]#%5#YրdFKB(*|kV֟ʢ"̩?h>1OjYt5*,Z{:*]~Ũ׃Fe`0*MG`Ll*H#kg+ภRk͏N@ -)S&vGy]1u*ȁKd1cR;c% vZA\Xn#x$j²jX:YXzEqK6oZIZ.6S'Ŕau7GVcL״D2?,H-mpH爐{s4$ ׳j9!OХ9K!pΌHg%M{S\J+m|ГR*Y')L PvȅaHhKyh7s=e g7OC'TJ֞k}ŒzfF6LK@\'ك@b!8qR/^D:\ D#5x*/).oJ)]8$% КiXG|};%GQUgؾY}: HQ. fedjۨ7ԱN& tFM 1hsk:fӵQyI;(_N_ *:u1VWBuFhxQAUX~S5kR !De*@A_D7le#Pvі=@7L}0h .d"i黇R%SGikk9#Ҍq*M<D#dK};TDGSNZ{.҅NG\9kgNܢJJ弐Ob*Y/MVKd,f/.+0f\c3E ,Yv:?BBa/{UUAKvW \p"7rnuʱm}h6ڷ/r]TGr*[:]t6\= HbZ[OǞ ڕR u#\" op wpRDN6)r\6mkU'Ǭ|Ӕ-ωҟqM޵jj\#anDa[Ah~"'ZcyѰka4J.ffIo2]ٯ DN F^e Re?;w쿏S6l3;kMq^v:iM>.a hS`v[YLߏ\~o}W^GiЋ4C :PϘ?>oj~l-YIGq|~zΫ(nxmNN9qw gU )CkFQZӨ NdxF-A~nul9k>tΛןۏoL91 M [z*1FbTu 㧤'nͥJ!Spx[vɼ{ܴBIAZ\ ۭŭK/%@] U _=4]hIt'0Efd(|&|kf3 0y⬛>m^H 5^̝}9DQ<=3nu|z7y]v Ө7t60$ٻzoc[adg.aT'c̩~7<$͝yErzęCNT .:*:*:*D%?ꨤJ)D%dE؄!I(-8v$D D"6m'xeށ٧S$,F w9wΧ%g!zgt;#~rxD^g o2/7{הkb";w04v? AkϿ{n31hx?L en8xð*g[:=p\<g\g{VRe}.[&v6X}o^WZ-0(%C^IX_ M3A[ Ӎ9e$|{}QWzLeOcmvvI[9s=zFf3GnȈOkjsq4%]s@i¨Rʨ\<z@OyC/.8t+ ;VZu ;l 0{zNUPB34ާ{KUp =iEV.̦wnG6ZN{Pm&6XMxc59e٣,x7!ӄ~'Csfʦ3.ܰ;/Ӣ[iCTLA QvJܠXUc_؞Ү`v:;{q*yyFF^I}작/a2 dbTA`ڿJz+kjfy_-3M7ƶ: \נg6qV le0k}amH)5Q<^-]eũO/Z^2}F k3caWRĬVE\-+֔]Ԓ#xHX A* սI>tl. Тf˳%(|dÉ^?5wI"͸^k(qtWai!8Rk~ l{oS&T6 %9I66 7o{A NAQa~3k >-jMMDL‘qAמ?bgBVpq4Tg'~Ynr8 Sz\!eU|US41E6?՝ϐtϻm.A>)z<.SRf>R6=BC+g*NDlkbV){4YǴ"@/{JhFY2,zL=K%X8k%Sϒ%MG_0WjIp|&+[zµCxj$uh̬F J(teQety+6r#"CZ$ ̇`dlA0:J`%^vs49le?]/tp>S)}\V>;?^0zZ"w(^]WlT2 ]޿k.&DrC!7h TcQS|FsÆr. +&f9Bu9~ |5?tpD+Gf}Jڜ:v@vit7rȥju}7~1BH-ۻU&Q*ϝ! 6TɔnXTs?u7q(rIyADF[7.Pu_oJS< 7κ?\Rqj9- ?\TZfJ\6c8n$Y,yǗ%lhYRI_vZVېWjUk{ I 6{m.Mt}țQY ~0rY9>DPy-[qTd?t0쾏vptޡ58ۘÏwU/ͽ2(6]CuWŗkUUYFGXtz6 eork|YA1=\J[3lDyx{ҙ__Hf(+Dgf'cJ7[튊. l3*Vf 3^=*gIJ\QK|0Ω)RCĴR?[~rw4bS|4?]wQNz7ug0fx-ٿ7+IySe V,2bw1)[VW{ݷ 4yy>r>}ӻcrӧ~>mp{4w7򫪌|޿ 'oҏ_zȷ`'T/ /~]8*C7QF|Կ5hްg \5HFt3.IGFk)p\ W K|8#r|ONc|Pit:9rGwh݆}kwvF8{e3!R4% [Wxrh~q QHBD- jLV$(.s`]r@S$,BTܢeC$X+M%BBhhB!&/rcҥOĴ8gEgEgEgE}-W=Jb `k@PK$(NzGlQ0փhes8m0rF0p~3_sk5&=0|7y~Ѽzf.s!K˘_U UqV$5颖(֬|Zh Jp6:_C} xލzEM|F[*C:1܋^p`+s^7j-~  AC޶ pCWEA[m0ԌR S9f㼳f>㼳Fȅgyf)jMδڻaf捛[t \@ó0j^#֝|ow6Γ\T`oI7NZh+{=5G݊Zi<]HSh!2岒SST웑+._qNy% Hk G-7v$OIq!Ǚf~CHT`9xݿT}19Di_o<`aN܁)Lm Ǟv C-baSeWkHTANqz$SpkMh}x3\F.9@+9 I.Z^Lu..sIdLok(RhiCѐkJU֥u@DdCc-AM MYnLS%chYЗc"l ۜQ4Q&ѓmA"UňI_zW>wEÞAȷyNW Z[9)VHߐC? Y22Z묵[HyrR}@>GA(Ax MVܘ ^q g"A"/ [S0u\1Nqѕx0L:"IFMqq0 1i X8\jYpAUB8 -3~8NpKQ[3 PGr"$.)Xϝ oPFvbbh&](e#q`P@ƀŘ$ P,O\syHBJAMڣG\jN(5[^㬊hʬmA\Uu*1lq؛-"8o*]|g*NY饥Z#:)JjjZ"t 2,QV`ps8XHm z*'|Xտ©}?_j'7KcT֝pWG?mrqoDWTN D1,x)?ʅ%׷oaPZ6:Wu~~`\*wU] J*"yR]pSQGeT`?Ρ6mq![RɘtQcN.S8I]RŻ]ʰ\$p@-FRu6GeѤ.k*h:FG)eqn8uQkF1TXW]ԩ'B9R-6/>Kfr0\5Mn l5愍vѱ|i,lKpt]h&6IQTY,Tk E^>3¦ rz~ƖMLJmj$EٞY6@rtNc @ ID-- #<&hIQ(R{pIa2^;-]u Cz8fepB[$j(lRc`O(c4%4b][!fxInz4*Fcg"SӄhrO: JQp!p|;@îrs:IF^u wzuowt^$6lcIB D!хVDUAOM( .lLg@E*c01h<0 CYd‡.QY <½ d-.Kkij諀PQl%yZ77l&]E܎2f)BIJ13cr}#Uw Jђ{S9fNqt#`VBQɴhШڂ RXͼnA#s4Dt̞ЋEYʸ0  /:Dc+UMc$;>[ 2E([q@RHEÙӍIɨͬ.sd2_uł{#ia89D= g:Q!3$#0t(ǮV@6nCl5gyXǚ7zm^wz4)3lݻMΛM68(Nm !|!^y!TD |'8  шrMJ+(2—8(ԄK{ kСyW8.cu8BÖwpkߚČf[R{{,u ֏Dz`X+X+'ŊZ" P»snwj6 8wFIø4xKLKI `Hbu 3 >p;\?AD޵7GĘ(<JU/IEh l,tM5!|'>rΚI RP;H>Fb(B ?tioVSSRs4 L͝Y렺vA!*ci=<,:<SwZ4h6*߻y35W=k.y2_γy2oNJZƮ3"׵gr$+ ENj!yMYa{yAp{gͭ2dk&\Qn>m[60zC›*a @"垊 $-}}E Rq-f-,P1AkHz<+I7`M5$. iI)|{RH̸܇B-\MDxރR흐gGgwǟOtsfq?{vd)'hh /;Y0r0,;ri'q|~gqjR*PܡQGiRaWxNjfDR4§wzAbT-Rc%V=-Lo \W$[svOE^ io8rzMfԧ8Ǥ??H*IA y.qOX2ϑ-.X/"$tSǚлɇqIq`oDl7.<^W. {|nRCC7 d_I耄 L"B |17 F9ms[9Mar(nC]7<+`cK\7SsGhGׁJ"RI@\Curqjz'g O]`BP[dY:8:HK-%w-F䪵 3Ğ}V #_<@)G,|+kFfzi2'Iv٘hHtì0C2~f&WqGyeZ m[O%I)ě/zsӁ]|y}*'kѣ|6#Y8 ;J8r[+I"o3xpvH #Hr_s޽}\=B:Dbg8.?:nΠ#t)8u~ARyIsS!y2'ܯ4’JM;@kiģ,]P&M;7CnCMn)PhWSU!;LH<-Ҙ{z;37: @6N@Šg/ zd_OYu_WN֝z9d˿qCT Kqg.|:<SI A]/ %,BB0IK?Gv+L2KyK"3r_"Z0={ȟ]*y 8 {n1 c= r_(r#)$D* B{c( GX4Jz J~ۅ k.ny!޺Sg-f,z@>>Ѯf> W(O1~9)*"y)4!5a1>~ѐ0m/CP0^ӥ,i(՘L.0V DvK=?%G 5R ("t #zTj '+aiWz7F _U[UJ%ZqցQsUy!6*ZnJJΧTBP*E_b U ;o<<„T^3ퟏLPRKOJ W0h t,#ɣݕc8돡%\K6G5py6i!&M;0ƤKoM oO V9j+O׊6 6>ךgd?*8 N\{p]>gؔnp:6u!{'9:=8O18Qr]OmSqZYAUUIV|AZucA߃Q %ml3Q+a<o *DZLJ* )u8d9Ì>nR?<]WAǓɵ| 3NǓhYj]c=VX77XIK.gs\jSI рYuX7,lT:l.6nt)5ӹJQ~AΪooWޥ)"S$y[E$ 虣ıV@3OIx?(w̗(rGEώ>tu?-aG| AN8I #{(κm$\=dӻAϳ7iɝsXރn1 u<>ϓ˴8stwoǧkx %_'~sjSūӷ_߼>{{zv翿<ɂTx΍w/~ MO[{/z3Ԙvo?O81W*{T[Ĥ_r}mXrM[vi0896׃M79΋Ť{]~0y Ge.|{e# 4$ʹ v5%vD+_Y7 u#O3@ѓz0$ٔJ(?o $2S&!>ox3N$t^ɴ`L*gΚlPEKxi͵-<;5~߽zG& [>[Q;6b{a?nE@I퀥*}3w{mֵO_:~^w| "\@e<y=ʝwc' ڮixj88O9Onq~7ify?π/3۷|*~*aoSl'CVmK r׳;q֐-@/aZ g4 ;$D΋lbR:Z.@C}I7Ի*<]h 2>hsHB.jTAB:iB\=_XMTa rDPK8hW jm#(v233zl[.4Z";\4 R)"؈ Ejx31sMC»RGѻxޅGѻ]ֽ V™H a)tp ėPo/w .fkEΜ|b㛇/ āl"גsJrPS- 1 2ti-rS ~IݠcKrU2Jrr=GȧLHAQV&b@#ߜpMaݏjxd͝^A@ΦcL\ɣqP ^R"@$Z@B&gMޟLIڠ=IDŽ st@$Պ2kRF+Y0BɤVܣZkTkJ oE2mf 0eE.K$E1(\Eq8dt@%8>W ߛ-oȍgMMs}9T?'cay(_uM*DgIMݓ壟$ y|IGK~\O|_^ 6i )nX˩(RF %BR"FUT6p4"Rs¶lT;*W;.Yu\""r|hJ`$FL#5bZJjvQU|~ROIͺe>#eAw,-=A1):0Υ`!Zp]/K R43C(Qk$Rf!Lv]FQ(bҎMhmx/GCmֻQX- d} :eLԢ3Oa,z=@F (ϐ-P&׊aZӖȁ*m3Nd:-uZ4ӜkiHdmf^F"}@C Wpz垩X S@ AXCMF:(,z؃{;iq-'"5JEp%g:FJj\ *D(JuAJzd--3naԊQpC\Qg*5DeIT&(rS"3,ZTh8;Tڌ6/Gd`fRR)݅D#CqeUƤe}r\Mں%jX8JR#VB%ױ%3z j fS3 XHekuyhU綣57WpO͘J3/؋'T%q ughTkoߥcwOOJ2q[u|tTnfwRE}BT\lcc)5/hj狿/.zҦpU5X(ꀋ%?\f8Vz!@Xp"Ӟ:7^DuP3s3JsrD[m M4`w5`#A"Q)D t,;&i.&fI]͠56@%IRߙV /n`&Aح%sդsUu+vIԂŧ:.(ORQ`PP5 c[uչ.UUA)q"גQ-I! 'nnmT Pف^躷'U3y{֘3nNS<2"5" Zَ;q,%A߄(e/{9{e˽b~Ҷ2tV#]{= T`y1 y>ܘHUqag8E9flq17kר^gE->gOOV1Ʌ-=*s|Ui}|7-YO^8cJ1x/q"5Y8J&!."Dw}?#i+9JT*O/^>3JEWR|܇~0(VO PĢ)P]±.^]ϋ¹uL%zdISG2 )0d #':EPU<*'xYSWx)TINd'.U8<*Ӌ5Kw~UߴybyK֫_IK7w݈7uu 7 iKU#>fDͳU^W9ַK{p봤 ] \֤xZFwWTn|| b=;w ~?~_U}RW_>"aw{շ~ _O|LR#u_pOWϖ$|?;_8^2a~ʿ~S\} ~2͏^_W\L)GQ-&q}p{ 9  jU]xƪ`jyoL9pUh}*fYXo~ANo~#RS < խJV UP_fWP^% Z%ͺ9rMP;(0ejvY57kC͟'pTjt,' ͎_kT?II>Q3$bd=8ҡv3okU#9Z3ljfRDp8O{@yVL9C#DY oY ~CHCԶaRYQ24(X)`0+KGIA!'@J뇔oI/s ]GqዪK+%< EISy @-+DQJ%Ah:@k̑,{ 8+:+j/Gs3d_la7/[agӐ;Ų(娵2N Ęntd *EԋdI}:V*^Q@GvWfnvB|gX@jR!Bxڱ{"cF77Q JWwy =E2]EB>MG[.4VƖF#tI_VSv8͹eRc_ŏh%{ſ2[7B 6El9ު6xĶiZIsj(f~]` ]\-&I941anyL^^igK*}VM6Zc ƞ}ei>w] i\^dYJ9.K1(yCy?Aw ?; (9`?[ :y5)^(T|)ğKт-՛~WtW<*4ޯ9eh^pR Lf U4U:6QUN&.pQ<9AJ=8sgԸ%O p7bFKv֠[R./ U)4%pGuO꼬JH,sOg'̖ڧ.R'TiIIXQ)i_HD[ZN0n^m5K3^.0 !C9:a24 ϸ!T>k%=p9тni?h҃Ұ?fvÈ988CWD[<$Ƿ~gw <ʄ}=_~B-$QN)=S7 ,G͸6Sp0d5=wUp(uPn|< ! `KgF6ѻ7@@,x41s)ґ= TCe-}iVpnPO|EkprPHl!@B!OMb`YEaN_JJF{bP.qJa Xo+JsLxj/EtQ\2}Pfm>TdU-vTiс}O F%1fZPtlv'@ @Ilz[uysHj4YJ]\]N,oLlOO˔猰,Sx /4vj^{wݵ:A ݃+n : `I3Yyw<2!P-UHuO^['F0b3c©>L6P c.x.&NBPcIqI$ bl$Ղsy=9J$W}cB7f~SZQT$*RC(~V:N | PF eR3DV1#3ZUMj$_Egvc1d]V=)lt%+,x4Hc,1 uNe0h XPdhƔ:JuZhIᅱ{4`ERs(jyI3 mm*D58hZskuȘ]5dS#8/dU2s0 kY0ƆLXᜡ)zշG vpA8an8EEY7wJ2)+dL:}J쵲K$ŀ:֫l\:7$5.}*ƊrBod3@-l6)2fhnLN*ntqJt+xE Mz0q$ݝ2.X-)*q 2@zP-THjQjZؗdvH.Pmohh47dSN9 }K~^`0/T R'L# reID*=?,5O\i+ײ$e`2,m|eKXi{Q69n鿢/\Z6w*u;_" pGɌf,i^|fzVRe{w8dt74hwQp{YZX}mQ° ]0#; ;˙ >e(rwW drnYrd?,10V"eU Lְ'ԋ(fMc/#A8KD##֠F Fd!ˁfOLت (<2xu%2V3f7,uQHP&@F˜ӾDt];"1F.BdЦ2vCs'98a`ڼ9dҧDx Q F?9Ƀ]9=%'쾏a艾Fraa?\uC` JA|vV9_c+Y_ɹ/\ ԳOW χR/h9)]2}Xm3ܔ?ct"TF6>]M _)muj~ƭV{M:h tJiLSh|e?4=UI^)V&3t;UR˩*zsLY!_i]$C|ĭJXsB g2\~L5orC ШK,,:d_݋-gq/_4×Jf9? Y"\t$Ƥ% br,}~A&H+/J8DqFpw nѕ@\,T'=4\<ף+*z+Dqn ȗG<_ BF_WYI EPV%-N#ExQJp5LLi:{gJ.IF+ sVS9زȥdZY*L. _-K1fVI|8bHd!ӌ42#i<,V6ʐ C]rFO M޹iZM]ӬY#rJZ Y=ܭ+Tu>C/]g'wTaj|=R"y)Ð ro3VZN0^R΀=wyA_.tF@EKs 0HR_X. Iꒁh_AvG`=7}Ժj&~|Ҩݍ-zu/CV;9 |3eWL"<RZW TP*^ J#\)ņ;Io/%ad,+ r1ʕ,QCas9w̅yEd %\%2 $m|(F-3yU;v4Z"qZ  @uII}@%_\o{Ft2iqj)(Iጨ<;g3ۯ$){{7@||#.A K1@I馉3y@uuA}4SA.  TsKJU:V%Tz0(zHh $VΙ$]Cl0PKvVwdư2ԑ-65/:ADlB~Yy'Q c#@:5qc]dvxu=Ch-5J b,)rB d,?=*0TDK&m8oo3O`oWn;WYt$m̨{ɟ]:ȘN8?=sW1YgR%iÇε?ǧǵ'1xVv<68v?<$%y-鯻 sF;B_<(dN WKW[}ϳEn4ںuAUv"o-|4+ݷ^oqTF/ypg`w=)RDÑ<)]C6ZH_aB7f~f~8cLj38Mf2AM(q@!m8wGy/ɩ̭*߽7?LEi.Z j%%|4t:mui)Ve:p[1‹7u1jJ 뀍@GFD{>c[$y~W3W3̌gmY=`CʯΫQ'%itF1y1 cT I@$z@֗*O$WXv`f[4ø+Pd 'Gv Odu@rTNqb"hL钎, mMl$l5]V̢S4iæ՝8.9{#6%u|}9vqwU̓;[tݷ^F,^J2*prDȅ@raJo_CQR p/H&~u|~(p~Ea]& 0f 6D 98&kF N|>;Kpxn1,6*NU%{G3:q .FO^xˋUk4D5s5E=[z:̴dDP؁RO Ш)iR K 32ՓoȤǵM׳=1+r*ԽUO2o6?xi67o-63 :7ee(,J(.u%4PR򪴴bJA*Z慤q_zxC~}3yX(&t@W ߭oNVWnw~F<\>##9'S,] J.PmPB,*kι!W-ш$7[- (V6w9كBi~ː 4pY D-"* LU*"E\*V9! Ītkۼ|Ԩ&WƏ-ƂZT"sJ)ʲT9˅F~ Ab y2QQRPʌ;g &˪Id 79# e^3dhA -=AkSoDկQ8jx6:'%=YNf WtpAz|XNQeYݪj$3@WϷ̄U&8 6rD"0RI}D0H#ƃIW:c klL@Z-OP_Z<<}s^}t!`k>bƁ9P An2+4M42++~x߹ ~ޯ/,ʻ %S S2hQEGKgO7f&ƴ]Z$vv#XjCoZ'ˁo.ә; xJۚ5:q)EC:r4: Óg䳩6\͔݈N+u>6.f,յw/ZcK3\?!{ w\?vph5tQ3SO2Bv{ҳ "Gac߰3$揃n]<թ<㢴dQϜL&1̗ >gvxr\|.>K1> BH `1*8O*1`G2>t̸JF>F^Q K:<;[-7Ytq%½ [1je1ob6w6+ѕE)؇>jU{q81Cp pt|#%piijMZ(?J=5`l?0-6=}9n=gn)f8;mhsos蜡w>r|>~zpn&chsRN"q8mLrSӝ`E\?='sG%JS5흫/?_}_k\_K9N6w&Pb u|\r80 #4{Z>r\04/_On~-ALޯW|HBn޺G'ϔj%Xn ݌zIl'G%w yۦH %͠ +\M*{/2|>]W@cZgo^g"p>3pt3~^} QjD{?=6K1鿹9ÎpU8=u_0/&0=P>JEGQϪj]&(āـLcFP>_ޔԪEb[A0߱5< hl ,n't>lIins`+xz #EÁΛJ*GAq֑XK6ٺPl,4Zn|"mK(\$K, ;J!?:׮Z^m^7 :ǡ`)o*I{e>2'SjalAv4EI/ic36L"pj%CMpZ#Y 1UޑP `Y$G ]ƽiN ?SHې(|ۥX+K7q{="?]ZvKXz-f?s0vM#5Cw ؾijr_+FG$r6+ _ ʊ)1{:4pMQԫPuڀz1K\(հޣt(׺0izwm<Ѩmb%bӱ915D5E,ބJi՞q Rr~oԐaf&GkuZVͳVM6ʂ(MݣNĐזp\ c|4ZAҶX?64H?6u3Wջ&=#{Fc/hK|f&$K 7E,ʷsZ6j$bDMLzΖ9͕AyOqڒ44Sk!4`96$MJ6t" #֝u ]IEAi.Hd>Gf#*NU@^MR"jtx$ʷ56nJWT*D(\fuwHs0JU-UJ܆gXd9>]ϗ񃗆y1JSTi>EuOQ6OQ5 Cr4cM.< [ kX'L\xyc${ "CYTlVJ gCt xn/ڛ$/ėV*bZ2r_5yMn üjDI!pNNM KNhqlKɀH!Z)I2ixYt)i)!) APprF !jHQ+R,-)Ut)ԂPFD'\H&8L " pM߄dIH-W!0K#)IF) 1`)F}ypɉAP5ʲ7-IL(A Y4Dj PEߘF RH5\AR%T<: BTPJnh K_'3rR 1$% C~0P(u?]^&[^mنղӋpa)~a-&dN|O/ՇwfixOfg}8~x;)V_m΅Yw>!%=;ҷ<"eyB~{ß; }JCeV[GȖCW’\3p& ū~?.L]@[H"u)b0VdMBxB>ͥhFyt&F:ĴVkvȔ$AX[aCtw#( a&1$WL4J͑)04黦pRErtD#"1' g 1wI!S[zLDf}˳x{Ss߹(3C4:"VhWij1_1䝚int"v ρb-g)8@EO9L¤5" e$Ac^lu.XâBɃ@X;2+픲R0O!@Lɳ^ Ką@AO Xr+AQ F?B!~4l{)xˁW)u{-CuK(&T'ގ٪: ȟi[ulyaTul\5N:Uİ@thRfI$n0o!gBšk.IdKv( hfFNPЛ#t&)SNtD2( 0m·Ľ-窷F=dV!(طu9&_P'ގJiVQmѢJR`p>@kĽ0KdV}7]j_..)Zʅ=m>ap9a Xڽ4o0w%`/^/{ Az=e؆HKր_% e!Aj7~fD8+7Ehw1gU#djȿ/7ꪣ{Kрk34'Ehw9m!/*_u4CfDp!Ϲ0rKd6!/yGLJAL",渿Ýhtò얻%g~H"('Ko%܉YƝp'+[ew",zx"Ʈ"܉aY#⍘{7F jݝKt-l{7 Q)۟Dwh%~zLItb{,ލH \o- $Ч!cx?Ç/.?/yKvD}$z ׺ eyqBm[!c`tp7)8۶'L;oM =$$;oM f) Iir][oɎ+_v^d< d0ɜu؎cΙYlNڶdU/eȭ.#dUȾiPpz08$bwu&\䍊CꝹ\wqf !hιfT8#$(t>Iku 9GZ;c;[/ŠhgNkkB0w.X / E BASl8 HΓZZdQi4}D ^ CR]@,&Fβˁ>e}գA\K8A jb;s6sHW.V @D91%-hI|p{:uF72/D 5hjȸ3w]fVaS4F.e\=Zt KTnY{Z+]RR{];HJnG˞RkEZ=ԥz%WveCU_aЍ.dsdYa ЙJJC:{|"PMm3ZSL 7J&D Q%# qwuT䋮@> [=d?|I]2d2$H'sfu!;DŽypq==smgHt&roW%o@s y7Kw*qpCJ񕯺noũxoK-f'kGqVO,:GhC<m4-AD-y'"8^ODbeezQˊtEbeYԲCF'] Y0zʧ˿Z-7!>gYPgZ?Yָڙm]*yۖ>-%&0'~FpӬOS;eSfDu6nDX䓯Sڪo$8xLm~.Y^@soo~~4JmUIJ/,tyN+?_ݦ=WI#ʓU)v^8Ҕ$sQ Ygh",x}'5j.|^sa@&IHƓM"c;$m㫵wV4^+KF*a!XU fqe@rA ޶Ɵs0++Yo4ꏋ׏B*;{3`ۑ+C!ݝs(ƾ2;P[*Z40m.֩w<(Z -[su hô1EJRV*xxb-DXw5c[?=0Wrrh.w):ɹ˒n#HD݊O%$*tp2:e鍬Ѽ[1rk $g`Ak,rrJV@A1O3Y p:YQ \yDHTc;baun}4S5D+@KȦߤ@ψzꠕYdAϷi}z^]ggo,]ӓx}s˽ ^'@4鯏73- 8/{]O Dß]:LkROk'}0';OECDm٫dFɩ]r}kcz+Oqv(u.vU3}$Ih%Zd$Ŀp.z5f;hTB'q)WN&FPZa&s9k!衃$۹F#5WL7hm:2I ٔGFt7Xqbͳ>7pBLhåb[k G._.\_qVZ]jȁm&f71hh?8*V-w4 /{+P^Qظj3@|tlx/ʍHKbȅ3Z~SY!󊢇i\P^;lCYŽw~\|wHVxvj*+Hq'ǢNl<aYy;="4Lخ>=|\Lv0,6D%:z鹘%J+(f lB}PH21zPp4Q7 9c**#BP( }m:lń/Ҡ%up{g8]ZBNR3ARJZIyZ | Cؙ[wQ؁FOGU@<4<"- iOb7]'FʉX3/7vljG/e':)X*|g1; 7Fh:DV"Hbv1, (GH,cYtwCgu^:ky8$ăJ9FG,Gf] ?-meD}F QN!3gi%QeMQxNFGoC4!*HߙLkIf2B1jodeIvR7{gtKdQs5Dq0*[kK^H-?PRk6*l}jհW޶NJ)Sqcu.s=B{Wˆ=\ LўԜU*;,OaA1;3 qlmN|MXB^) HzrE+J>Y25 qW,sQY,|ɼC|\ {muґvyYҐu;?Ԯɀ 闏EQ/c7EHi+W!ef8a\nh{ oa>>X%˭[q Ѷ@nWLeQ^XHp%5Q63%4TlZ U:2u(u]3s0tE﫣1eO"Z& ju0ޣ)AA|DsuѯGOV]tN ;ѿ?7JB]&tVErw2ΙI]ڮ:mx4h)˜_ r_$%c|OX0jmw0ڗϔ3Zo>=(bӻ`{4ens+@1ֺ|0JVp5z="phl4_99.{&sQZ}8;= ?+fW)ߗ*֥Jui]t,Uz*L#GY cc4s;4Cb>]L9)r2)1cT*S@/'')= IF_W1'Wǩ1藓I<#+PUP^W[u%i] yZWB6+!79%D6ă*@JSBm2 #J4\>w>w|m}Jc>Ool'}}\ﵛ{pOZwǾpWǯ^ܸ'/jyQwcͼޑmƳR 9LdLND,d ғI` CJJulwP{e>!%K`j^N LnhytF&棃ip1.uŴpь$wΆZ#@@*҄D u1z()::M( uu`Ru>aeZ'L넕i2m&4uڑlcNYfO!]1働)YޅH8B9nR.u鮃 A 7B\-'YYi9/ [RmI%3YJ]P\ ^% Z A[BYcV%J Qyޕq$Bea`wDГ0axg^!OZ2IQd&YUn!TvUqeD$8Vs3~LX) QmRt')ǵ"#Jz^V蔈V$Fe5&t;,MjFHb3-bpɃ<,]rH>t!2ZBHj#&>P0 ڑ[S^hsp$Ȑ13/'eAi 8O[pSq9nC&V\I0sE3O iDrirH= 8P \Ifs ?ݷF+DޟP+t_W:bۯgzsz|2o -Y-Bqyrfw!&43mWS#Se_xYմ7A-+ JXҐޒ6pd!~3@6FZ$;\9ؐ 2 h>dA"[d$#@тǞjMAbOo6l&ln* M"s@pND(WuLE~%20c(2deh\ ׃fQY*'rJ؃D>$)N)j' :+040)(i# @ Swm;=͇8A%Ys}VN ЧD&\^0B/ FJ}"mSX -܊1K'qyU,eՌߌe~Gb-ECB18vJ{Ót i. E?90A^w3FUTf@^WL{ ō!uxx+s`y$_O֔ %tv_y҆TbHy? v5^=-y<F@q D6?H?Kis_~>0I-ᕭQOd\Y3(|324+곫%YmW?PPD;!bW?p#?XJ[&-ٕsL^iB907.`JĝB(&\u`/~tQmO-U * KsaVJDUK(Oa.Jae']f6]3nUB~%@qt73r͠?H5sO0 ^ܤS Fff{PQ?eAwjA5 5IĤI 2B>%HLj21Y8Ғy()s_b|1エV ĠU  b3B2;k0 ;V螄$K"O_<ؒ*ނYVLy|@Hsw1e-h ]ǓY[ˌg!jiԄV?q7N~#io{rkmɭ86V?P!FynЊJdꜸɤѲUAYǼq\H!X/HNDBCN66eetLX`8z2aI&Ya2Θ`X(H%BJImX#Ȏ$ܽ&xkR !%^ksZ+G-X)ۀ\iApa>ٮʌԚLt|#DJUbE pfPıV0k! Ц[.xsruqY7'q4b wzttru_?J˷o>m~7ZcW6įo?[ $@JXZsOSqWo–(Y: ?" &C\\dKM1/0ݢirOݢiJ0tJ0]7['9:˹lWD#kQޒEflD:xVEm_3@hUlF)QQ%5Di<>5F2;Z8&'57S;&Mޚj ;m[بo&5@n2yGF0$SթI|$4@|> O-u|t= #UŔ69|4 #iPBp3yml=7 ZMvB!q ҂[i#2oVZ6f4EId )qd3Egl=h9ОHM1j"K&k)DNavOTY#p 4Q16YIXɖKE.Mz,f&J@d @%sȠ$+4:F$[Nsyr9d:46" +KHj|gí/Š=ʬ:r<$ScrA8g0[O3ry45AٽY3 D/?.o8|"YH7a”<1 _7Wquh4F`󛏗FTqΑ{|=t !+:  mؼ_秵m |.ׯS kJ>Q߯IR7㣿?B ~>|0A?4rwjFNuaᢺ|!5T3οamNO>t(pS`KX]i+VbDM+6wފ7 jT>q? OM_TR.;^ [w-9fwǴ^H"ZЗEnNB]_ⵃTfꎅnfa Q&[sE郈%̫quQ+ɧK1Hn`dmq^;Hk_clyXUSvp̬K^wX#:tz JFVQ--nȭY 1lsB8PG)'ER 4YmxẆrwwD=LzM&>WU@WOJÛWװ8+I[S fn^<}_z@e.`YsE|Fʱ<| M+!h->}7won>_}d?DY>_I1sة|i̼ vMYd(\jv$ o s=]]NZ{gW ^b 5bA"Ǔ= k3 9ʻjGy=zqbOc*Gy E Ewp)\Zk^07pgzi<<+8𒹤;Bfr~Y408̘o:`5a3Fu2m5W9s̈9QD(94iqVlHN&V=K!Re N~hdh6/XH-۩y˃[5xc;I];ɇ$U+g'Nvhּ偄UĂy;Vʼn?ÉBmʝ ћ-tϛR11\"W)ףWxMقǯz۳/@Qrы"XR TwfH@ $;ufaE;g,z09'8 ngO=b; ɶ.2;ˢh'.Z*v̘4 kٞoVT[K2J]  1<.Jct6Y>[ֺW&йej ^mc&))ptuL,Đ,30﹖l718bk ޼u=,.4LXGcG)LGdo\9clpIS&0'gxĀ#bs g%Èܘun09z,60rr00:ݝp:[$\>fq r isjC|k !#_u>~8?둤9W ޜwg L:~nu>3n:N8Rӝ۳aFhW0ʐD_$|Y WT` 7 @ָ]+[UT_oHҪjj $'1xpk 5s)jCx,.f*2D"CO`TQ̬2ŶcO/VCrXL*e; G:Gpx7W]l^*.%4<[)s-&fnJ8#69~';svx]F wljA.0 bOi<@ilH\No^o$F^9>-.pXH 6N0Ġ(4p׀A7>?iv)v&*4 I^/ mm-C4!!#ˀX2 qȕ2FԐ@#JZ*ړf8J4m`ց }Б&[ \fC&4Ff vIĵ8u@66 AvW!(m諁 wCha  !qlK r.r`9>\ʶ)rl99}D&d}r[G[qt3Qt:t2…d >F]eu1}4ylM~'͠3 Ɲgweyh0|$6 ་`P-svoZosf_,E@.2$P!G? 7w|'>q^l!yw>qZ4`nwS4kvwdӬ0J)Wm*ߴ|ֻ+v%cQ6hzN۝Yӝ} atcx-U_uN-I`{xZ䡜Z2~+ p}99Z߫[Y }%Ӂ mU! ϔ+|)KA Nੲ)m L)r{KuF&pxrqMg9{>[g"&`~SOZʚ2rAU ܘ:H㞻r'1VZK<"(Q,8-+i~S kET( kJ ƂκQ.||fg.R=sPk8r_bhe0OlǯZ@)D*ܺ ]<_圹%5KҒ5~G+&La$R91qWBTkv~,.cq~տɚ/΋qt ąd/y򚻥qse7 .g'j4xuUw%JĞZ1`S^ᄄ<[}'Q+f|:v4  ev ^ɩZ]ϯM /-dY\xe@K (%I$@GPL~(ꇋ?~rۋN`Ə>;%Vy%]'w(9ѢSgC-yk/xwιkL'_:ֱ܈.fR o' _D!(p%,ЁF3S?.e2@>֊P<qТI!k;{;RWpԜ.b࡛ARxԻa_D5#ҋ="*^*EbZQb@!P  f6FpZ#&$.eT \guHGJ %Wk"aI6c~ϟd%~\uW(oFѧ*҉M?Wr/)Uj&Ͱ"`HvRRBjMj'RkP[a+[CB[0.w(iCXÙA@9, B,(I/Bqwo/i [efJ|"ׯbYDPn@6^ߚ!5kFO+OBVR{'']Mz(~{ .q7[p(`!BTIH$8ܚ$6-rySzN+QV1촒j/#Z"sy<̹}nh`>R~:nSd n[l{ww>;щ@w['vؕ[rFv<+"zn4y%QX: l2Y$Bz6k)#F\ՁzÑ D7rKf&Q)^jkH3Zez , [ǣqe$gRj)+b8w[ܒy5;kVMAx\q]|Bۯg_hJpE^[lăa3ͱ^j4z+/ U (;xB$aS(TMVxl6_K6hHyVXqBo iTL(_4hOc>c|4Vۤ9Pt}r4 t8[SơԤKp-O&!p{s< G~<0ؽPȁsў9G<=;;9+Svpئc-8ndq`W#8svFę`5qv3WLDuQu hԄq[ݦ'p™.O8݌еv$ Q!#w?5OF.թ0"7㾦 $İ" N>WGN?NǕl\AWѿr)&xu6a|lj=!DR+&8j% k- ٲޖʕLY[)!rTtyRunJlv$Ag٥zw& b7?gI,I (PFDTEP8T$Z㈄!G#Df0 dbt\I؄:aڼ qLavfvBSDCZL< cuɂ.ˡsg%͢ |` ;qg?g;U7v`Lm>WWV~0-87/{wKOۉ􂔿>R*EV k8 nPG""$w]N}|\$ (O4wӴàw`JnҠ$+ &Ks|'Jz'ى˻ :Iq.?3<05#ϋ`{S\ziR )GmZs1un؟uƷ8r8ڹ\AL&10b b_}V)ɼ CYk=M־X c̽z)!4No€" @bM|iʄ/rC S ^ 3,8vvҪ ҪKԌf3Q$nY#,.[202'?.jbj6jʜN/"RM1܂qxi6 o_(R#Ƿ> <yFe`Պ&Z [CŰq Q( FJkT|(èGYPmcti(cL.:GMS̀`qx a#-Hi|H-Ps|QHm`x*5CT`0Ve֌`㭹SqknÊMdv[gs3 4|k;ϩϺQGТ<7J/&:JuGfvV?&X@X;*{t;ζX^oK^ 4C_ʧO4{~<&UrNo&(0/\g 鯣 :NWdO|z]_Lm+S7o^]޽:͜$ٟCi/ݓ.~moׯ;;izgƉuooxM:-6}~|`[7Wάrδp$Y=u f/{vf<ucߜ&eϿk 4@Rfhn4{W۸_ff't}ȇl,A *VmZIvEJ")Q-Ab{Uw@~C~"{xx<ʛ~>=f7]Νӧϣ"9ưp]r s9Q}0o~ʔHM?҄  ˿}7^0߂~!~ӂ #3\}s,dA=KktW},r8.M9zz\`rw;y-iD Ix`$%!)*aJ3ۅu21N*D\I~L?dS=g@d5FשYr1_-# Sz_rr3L1#p 'vۿc5bBS 𾍾 7HrEHk-8/\H5gL6+Kb"bH$J˔$}P*BYDg ;RW LU1jKFߚ|`3"%,ٞ`H 5{kV^!o2V䌣\JAץk`?2] 9o\-*"[5#3zTLyy0REAĊZ(VT;ogH馑tSbyۅϖt~2z4^ǧ~"~ z}FĽdIh L3u~ُ~ v)+L2 \d龿-+ R߻rϣlK<>_r1<%P\ ƚ!^*PqHP%yz >:]s7y ky"yFcT 21>e2 Xۄ@N*-AƤ>h&1HFcȀ\g'F2w]5m b˃A VD*pD}vrńkW|2|+[\kA<'F{yrDd7,vVB^k{a*ӧ.yF!xWfz|`U]a}H3>^q58ur'NOy.-NQSq>ՊKm-Տkh TX8;*W'לi[\BʨjW('8JPcx|(&ZTwٴQ (*9T3Z(7< i2*&mFI WQ-4ozBc*Inp1"薇'1O5bYzĻ|.&eO6Wb!3>$`+]HI:Oji4*D*Ϛ@qqي1xN :5u+u9\wތ~|bE'8˗hd/T#o4R4Fι$2BUmZ'Վ#V!/?>=W̘MMPkLF4aJĈYiԁ2Xlq7dW:91UYura~ ]DĤ&XK,Ӊr&MDƸ!a9!kY6{#YV,Pb2XV`DlFk %bi]~g1Fv#42)9"C<>Tag+8rX=Vǻ=flg6Ǵn-0f؜2+.n{Jӌ:n(sD"FiVg֞ۖX3K[ΗsVܣ_zN)ynAt?~gP)yk궏?gՒ2&l'=4bFMZkָĤ,¾.Z}/*pI km&pKQڎfNfU0NS $sϏ{ eaQ 0xS |g= čT G!tv&S0IgUsK+K @߇L.Svb6-eˁٌD5kNxɠ&MI.%u$]*K nVJW)%l#P]FN>Ee/,k°l,gc B%ydurΏl9#օ݅*\zq# Ű u;sQM03 G2d $u1rn*+7%MxM Axa.Q]F`Q x6iG4buw)Ĝז5}aPR3]: yy˾;y eEsC z%41)6Dԝ\6ֻ41 >ō8]Ym {?=<#0m\Ň5lc~:bt|H> (^<}ؙ/Iz?|:'Xq"D S^$Zlz*r&JKɃoQB~CFeDiSO$v2k%g{AGaiwްÅ8"bm6ƇK7(>уާx+)%8SXw-jNR4:3vEcTwg1F$z*?9+--B-/qWhF_[ggFrRjE= a֊6_h8[ң%w knHN"&Nu. ^y&ԪRoz8b߈[oWRI#VIs=ZB<[qQEiA.È˦(vdcpήJvY 'fE{}QknsWֲ!/t1Z/>Ckew8?C]-Ͱ$JI.!ǚ65J^oAw 1um q1Mj|4X= MqdƝ7p~|!,e墨EOo~MϿxd|?JSlRFfI.Qid`ϯ 90|7R/nW7WY l4 Qj0q%Y#^@WrM?+HH6)rUC7OqFQ#J'Ao+$j% { HwlGAeRٌt&_DnP/f_L}:.KEĤb--H/}IoEPZ*$,x};Lʥ \Q ırJH>&*4cHS-{w97?77!lpOzn{UtӃ3~z~AO=? _3{;_x]>0F\ty{55J'cFŭ KG2O@Ry7S us\6;΅oE,EZz"AHR bq xA\1\R8*Ϫ?|hLd2;dl,Ǻ>l/[11qEfWkۺы)pA8wlk#s=q9UBOST#zv2r \"LFR<1"Vz(YG3p]K ~ R'p58S) ڮg8xDZC{R؟JapL,aã[9qmeW2( xe`H-c66 c!HY -.x $W7%pR8NkmNXC79B?؛V6&:Ր@xq?=!.Hé/FׂI:aĈ#OHy 3B@* ߧ 0ElX #@{Hp09kgtSTI5؂$#l]tҽ0z*S%cٮz;RX5w=JhX5Y ]0B2J@v5U w@w<*PKAms$QR(χ3'\\ 5"f;`pMw&9H}!ԤL)D<C\LS¨=<Ňi ",YCQ_Wp{^v&\NOWC~k9:;q h$!\ޮ= ހu\j_.sW:qIZ" u}cR]*; tEr02NGzͤ?pEJ-a0I)b^3qIjmkb Q.l3oS -~[7@r_:Z^PzBƔ-|+`o+`o+Dr;0{lؗYN8X),%#ˋS !q $%Qg'l|v\K̉((Ke#0x0߶c$RnܝJw:gb@ 9"s?Svuۚj )(Q+6)Ny%$7IG_ 80Uj$>@?T\)ϵb O E=QjD$u/}̄agn:[ Oi)gKtM`RNp,iDjyh˶o7 JB u06rRlJ7nn`YRoZ6*"iydӢĭba&8L_8Prs˜\` H +S4rl̹ '`x5mfŹLx˴-n8pNJ$-q&BY쏣DB[rR(hq+ ǡSo8T.NepΆEy>d%/F[n2nJ8 I^&\ʵ-6"fh5b|=a["".xI6Uuvhرf31D<a8aM}\JF&Dʁ!E=nUԴS2HHar6ʼR%{g@ yE kn,d/IN^7\Z>Nf@ӔlӨqw(x4 cwk#wPèAl5mqi4zLa, $|@ ʝ10w38NL7~823|64nW_ìKzH8 9[G3%'j]ī,[ fS#OL.01j-%2(z6vzTcNSr ֱfևML?_*iQce9[L8ҥҚ_fk^H1 3 UNՐG-F5A@7:Oqg!7nhiW nXb9:22{TWZkl?E RNY6,~SYphrO^/ getl~"׶MF+/Z2vϑ>H5F'ޭA89?^?Mk:vtr6[ZsݾkwAcO'ma?M17=]=|Ny~tvIt:ms \ ~mE;myu{AuJpPFRxQ"hkنj{ʶoDN ̛7oHH=g,oׄɯkqgw:ٳepkw эg"LswԚ$≙.{ݧh8O8Jkꟙ ܌qRrQ$jlkzV1 rnBN)@B]'? %8]T곩V7N£pԄit=cjQ7Ҁ;,>)K3ҭ̶)3lf U7/N&BZ$9yVz3Ϣy6ŚLQߣ㮙tXj >?^u{~ucg⫷W zH ~~14^؂Eς1Ob~v}3mw>1?fɭ?|"lZbRT~b=U 5-?6Gg&nYO)<1ꌢw­27tLߥ<`ħ ݫaNor`([v={ͮ~ NcnhpNjY2VY*z;%F]5_4R? o'7#`C+i*#K^xE؁f b%8>8[}ge~`=!%'G`$\$墆$FXM56"Mf}'K$ZU@yYbŘ2Jbtn/N;qU8oΘ1:BjnfdbY:ō.[ ~PJ$Ǖ<9&90f8Y]VG͔9Dzscs3xqs,]sK#aq+aB=nR$*!VܱDVV,!nU*Bpx TkS)t7PDs:c┘~|s(B󡸦~Pwp"D`dS^j%μ^[WqTԞINPl=7Xbˊo^rQh  ӝǝ`'zuq B$6es{؏BlN&0&bQbj 5sv(j: pk \RT2Lj"ep ʎzL0K$y(gԸnRvj PXNM^gHAv Xm3L$%Ec9Vsö{8孉nv6-MMͣ?&yxd0Sqzi]P崼8mmF1J[(m7J$&WeT?F^Sd>&CQvlAV>/u|l )_XZض/THe\ >w H#0vxGߨ 'sg0DJQc=Δ5c}Tx`c R3<f*b0O17qjAmO?vőű .@O .ik8htX^@Ьs?`ɔUiێ=t:PC.e{HU)S׵6{=}5nHsjJToTlK3*Є jëHgC?CH?K*?hT cvЂ k9#SG mC !N y| `\9=\zVZU!H3N(|T@r*g3$G@rf"4)UimY~}IA :Xr9aDALgX4*CZ{܊c M0EDwLηOS-CۻW5><5EQ _A}K޿mcU&C<38Ts)8>"<sm1N]Kc9X>ߟ"@N4;U9qp̓)#x7A6bڠD Βe~y3k7&~yH+Rd3%.m! = x<A>~~\| D%#oV!+=3/>cqzl)3Cx*!Nfy`6@*evV K)7c\p^zp+UfL3u\0<:awt*ey)l'\~\"Kl{B.1|"MQ |P͵dB3"(Lw,l34iϡ (D@,ogI3&=hvzmŹ Fl{P"L6B6!U6~ oVo'+W8aik2BmDInP&8/{6r>l˂CUo[Nj0Re;[߯$Ar@"7z/꿆Q>f17$Mti.3tCKKlXs }PY8;`m"`y ꏼX9k.p:p'6FWeGW@z՘! ip3 < ;brV"f]^ ݳ3볳;߾zq0Ir.hTlڈ,Xc:f#>ǿ>7! T=ܓe[7dtYy|=iOXZt᦮ 'yqc²"1v6oO]+ר77'?­^m><$gTۘqKnœXXL}X`b¹3ZF$Ŏr65%LE!kcإ&3ŮѿؒχJ6Ԫ^躗bf&>_}5{N&/^A: RP*xXqmo}[69/LAitb`Q"&I-Ďհ t&<% c`)V1x{2X>^|Tx|ޒ&yβ/D x,] )jo㉒^JҁPɀ bO k=I:^^Bԃ@"h9iʁuijOش}s ʎ'gaϟ~r3xb~B]6x:܏i{qU~ZӢ|AEG(u1,w #80`O__As.K,Y={fQ3+8r8:bcʔJIj51"(-iU0L8a(e+4NwBNPtU+!Gd!nA p#F6&VQgVB f N$'ט a ce0KRMT%ܸ´j6E6]"|.q( Z:eBf"V昤H# 扏W ,ra 3UÖo۪Q* Jru4[o>qcֻ.OlY鳬Hbt/`,;as$!X xM^Zʏgk=  A8Z73?#˫^:Lg 6Ͷ1_8)t/n =y6pݻ{gS@_e1ٟ']Y0)U ̟;;YT~P+#hXMUpG &O@X 2ʈ溴p,7vN.-M]fw\HHbBAr#f;"fdzEKN>`AER-S&ӺğԁF긲R2Nb0bcC h f/ A`)}?mX?'\wr ~ ZUKk_DkkH Tݢ99q%8wtZU5ɷ5vag!<ښx#)仹Iawz$× Zx1nw{qΐ6 0Pׅޚ!(î;.j9jT#&_4|k5(9ݹza{e[d15l+Q%dN^qsTۼ!e#k+ nLRTCI0jӞ(B̥U1pdLo5T_)`y#yb~9|?LAVU9hz{`\pEjdLmyHTH`M 4jcR$-!)8֚4MCBk! ;qb.TS洤I16R$DZߦ`!@*TZ&$M&SP1w pkYӂ_&eB◂c\|Ut0{lmI  \?qQ.^^kcjWkj`iիjڔliEBX;(A)kdb`ppkc$"Usyr儹Fw*e9Fi&]\=xJM4\ĥ=XBȅ†@CsyC3. AHL}+ݘDߞEس8G͝J jsT uFb[(??lL)QFscќE4'hhNDsJpJh{s֙XLF]e8JŅ۲GTƵm~%<8X*X|:nC!T>pՋWO;3-ߑU?~@-/}A)La>5o_Y|9%ɮ1Bp,бX ߏ% vm8$ q[[OҴ'[HsAЭ'fх,YS+D؏e 쉰 8r(oCqtJOf5~jR;; Z3zzoExwd)>(E{/D_d>rU "(MN0NKbG jcΤ!=bɝ.`L6Z U\9*; NxS&qCL;aհO݁=D9n$MlL̩xcSo*,a^:~֤^=[sL*IFMi$ך;` 4#oUjBEuyԲh_.61C\hIU;ѕhJ4/VPPT֗Ri)3.:36uGF"G|Lnɕ8 -&$ CaKo1| ~-Vv<2a(*aty>]cU b̞k\0h'o|j׫@pFϚok]_f`"gE#|ٝRB1Fk H-|"e!^ EpP0h|ò͛Gp-q*"%ƪA+}BW7K|W1RR}:p%ʁ<$s1o4݋W;G=R7 t^y0 sKA,P\H\߆ec.:!j{&àD#tA(˺S;/8ԧNɆs;'80. X4R t~O0s]4p1' t\rjjXE\8rdS%B;a bPc,)VTޢaՀW7ioLm_׷1?UM&ϯR_n<;)0٠rbA'p4Bß}?'`q H|Nn0׶Fe䏚⤒G*Ej#&biv ɟ~Kסf]/<Ta2CbNX$(&3NGZKŖ)-Oӻ+pB^^e^ht!(\u.' _B\k{ܗ!L{xQ9&2d8PB V$?\=><'3xOgf67S M/bn!&f_d8 {r~ ,iôfndS~#[Wg\Y0e9{:%T]ٌd)%?,vT^fgϚY"Q?.+53;EcyiU/$)w:* jeݻswdl$lʕ[YQI_p'zp[3#Kһy{=kL< g_n,{o*7#=|^0/=y~_1$c65yhkmȲE z? vH#|¨[$;Aܒl^E)A#ݖ)[Vݺ59Ӈ9oxʵ&NF%XoTKHY,ryǵ5W3Z$JsD^7?TiN݁E\MR^ެѤB>3_n; =GXH}!M;~_#@53n/Cbh6!r nA"T!E;LA,QsJ@ Yll%:("4{|wcYlxP,vr&;Y[6(#vVK\se10Sly{4jْYjW~@\OgzaHN>ͱgE(4Gꭕveq^cI8> h*ZPo 1Dv\6/:W ,#~IX|TR3%'Rh;%iqp`)ǭ*Zc * Z 4 j)9uD'HC ~f ud JHW2W{2Ij9MHCri )VE\i9z9-׏A@=M@Fc&E*Vh.d|D༇+4`(D  sO~6ՋBo )I ,X <>Q׆@SvAi* R%=j j۪2,Ct XY &6A'g'[/ Q(!n qGuVJNh-q ƌ[lA!ҸxZ-)B6(FFɂ5,al]~:` Sa\xɿ͞9Jսq;N1ZԜՠn& Yr4Xpe6r""$ASEx@\ZSbgY$h ЎO8tfC;M=0"&˕b"h x \bd1Qi/gŁGwn|8Gu DP8~HŰNaRu⁌@9}PNߋQ+.{HhIsTxTJg}"qBJ$L5G")1AĠ-A@;'PD5,MPnALՅ׳֮nWF=G"W:Ff%F 98hQHLLXu=(_?aYd ߜ f@tI AΝi0"Db- `$P#YC3~bg_)gm;( PD@šڛ&EaP+5GxlwR&.Jvst_~91[*0n~3էF[<=LҿfUOkxBD*aCR!˻raxZ,|hzf NR5;wD~vj-ׇYo,: ґ’ ފ ?*Q+Ca"WA*U{LbvpAIüci T{ 2D&xb g6#AYdSH)7BizFs}lJ .^휢:H+f XnǍaMaЇ=,C 7MLyg#>XU>pS͒NsuŘҽ6.4+;ڒJ-"ۜBv@t)SMB]3K L0-ʕl@( C3e 57愥&TDaֳk=H=Vol=֓TdєF[57eC[RD`TYǓ_ltgQů+SUl寠50H>N}N (mGu6O "@#hReT~銕?nZ؄\iBWC{ʓ {诹( x dQZh Gw0mURT&) $GJJdcZN%z5ꇼ 0u@rR2>d_i(#@*(:*b/_1Kk3{ {T$+08F{y3+qY6Q ;S8+` #r,H~ !XZC.6͸ :#*~%˫m]b/7 q eEY݅W[FDu MBbN & - SDz""0Ru!Y<9Z[DI# "Lf,nn{(|V#k ,#Z8'ͅ*t&q)#돗 E[a+1}7IљmL~DHu0I=NQAZ[O&Ro~$t2B+F*>*٭К6WlEfz畉*fJIeҶؕ&#ViV@, Jް) YuI匈1w~0PS"iD$(ȠMi=Ce"͆ PRhEfõaX]کڰ@k6>gBLJJ貌yL=̱57|2p2ۃ?ukT9u \.MPipkH?FZai; 2 Ճ,޾X3| `$YvS07_,]̟69WUhx]px܎Mf gG >]wv\Õu%#6ڽSΈmDhE"]bquHKPdR(Js'Naa!p3!>Ip秧R뫻yUTv8F5{k`IncY{F3 !8ziQS`cId ȵi1Aжh/\L]\.̟c=lq_O3'j]ôCq{锂~&5?(M_oW`8rpɅUK [,Ba6qnH*-&Q)vTÊ}o6Jݏ?~71ϩwMɀ-<_{kSߧrUsEv3憤 8.7iXjrm;bnCkR- 3f1@r~4]=30 t{0,h99iXRf)inhG=Q˱XY<٩ʇk s~{w"b>YZx/s*{:+GZUO}bĻ? 0LqߡwиT6Qr'ZfoݝQX*z>r T.bKU =%!>nSI$/xiqrs=),+,>9}`ې2ުOM&tqˍ&iQ7kW])@1֬NcWLָ4rx !6Z̪u"hy)>&sS%.sH %cNqe )* abRDLT" jLγ& 7*OX.oJb=ѻP JUP.cƅBL3]oBn^b&Aۙiu,ivae-⭲mU˫,Oo'2.*w[a1 8p?xnI*Ŧ//hcs?e#WJ+ZgMPjydo#ڮpk0٭܁)*RȂ % #+pjӺ-Yn0ۑ-=X9>)k<UxwPGc)Pz< bwT6T?Rf@О{h=F}S^X*v' sNRP/hY@}H^OCJ 0}6e7 #moh&Q-h犡gHv֑E ~-QXGs"y/JA tQ?͊¯\Yx'* qmciA@~FI#vZ49\LdId'>FΒLɲEѵZ 9<;o8~n$jxC Vw*'j)(kОc V9T{w_HN! ̦qM lи;5~Xwnh`x8y=g%qdǓ۩ڝLMr~8̒;`t7?qZtz|2a]<BFe& 6 61s߹a+9xOo Kd$ws֧F_ܑȧl;W}S+yh>Dq7n `Ɠ}0i#?b=eM5Xt#/+ uĦNxѕL:xE輁8Oɟk;f/nHNO_I釃ޠ`vN9A@;rF*11`N)MObwh{ HvN\Nnu(' #;>.L~wvN"' c 1;_BkF xlr[x>t)ڟ}_~9Iǃ^loss$>4%lT NݡufHo߾"4w2rIE{~۸78sh5IMtepV5χ6j]X7l-X@l%"blK6thʤ{ |jawj[yYI~ rbG;O֤yxSds5[\e_|w{YO?Pv>U.ld c˙J۠GI5ӞЦ0|[}Lo䆣骣[K[) BCuUsՃy}zǔ4h$#wwe 8aZ"0 L ǹF FByBČ|J ZI vKi>ߢο؞]w2?xFf7_"S9SA?O ;OnIOEAࡐZAR5l!isǖ t@&b*hR\N.`4Z]zP_jo$Ԟ.Yjo d%DǰMUc?]+C\yBUWNѪtpjx. N֞!pXϿk~;՟pQ(X;>BҊ(olLZaeR]c+ yżZ?D80JI [wmDdo-Kq&PQJ9/-6$W2PFxKX8\,e;˩9j44V+߇!lqBÐ7$Ђ FD"[b VY+(]MpF$srTgfFp2{ X呻3}.RB2O  .ݲ ^$m|蠇;iq|wg+}f[?"q>6>23\.|P uyopV EA&>٤%obyU[PDQQД6E+mQ/ x۸)QDAUp&szQTz܃u .HUυ=j"o)E[%Vdk~TDɆTd]FOՈɹ7QϚS륛4˞+wLrtQ>91CDP*i"KzaGqtG Kə9'C,<ފ<ER]LqdVcM"ґF'"HO%?n.#S7Ng^qzFWU'5ApC+_IKOM@dQ+#̀{/"b:zƩ4I(qS$eX(Z=M[Dٚ[y(‡Ƽd>v NQ]K~>y tiȺx(ݪy^k.ѡ[o/b(EIV3+JD](qj*2s8nѫ_-{eY1?LmG~H}J:%p)5Nzh줦4>9t@mj8J ^Ijr,$%o('4b #0 emYc1Hxs1I3E@ 3"$y[7jRO5d BJf MذHib!_=Aif;%;0jPud&F%96ޘYVMnrCBq#{`:ZenYp4(hK2EHJ JmdHHr @!Ah_"H$S\ckmH"gwT".\^6 jk#Q )ٛsT !5̐ r]Uwݺt͔Q[AQߜwig:RSt>M.z_B[ \,UՌ~[q!hzǘ4{p͝DVk%3!do,f* #dtKȽ& z/Fz;pZ1wSF+a.̨ +=QqᒿϗdL+u&Rf*J_Z/"3L:ďZzNElGC+W1E'3ymd{UkhqrCv5ƆsMsNR a>Vr"^U\|]L_owI"0ccKz qe!L., D&3nedb#%dG:o8QR|Yby5~:8:'m'ڀNא&є;dd,{cr7'́D!ŜRXAx"3XoNG?L&sק>',>'-FYٝГ /fhN<[.  6ӛ 6QCk؀}C2U`d_폮[ac$n!b}-[bZ*,֞ܿX#anEgOnX/x0x,qZ1(&;&TΈ%<(RU]\4N8+9hǔ3 d`p 䱔vT%해92'd>yd!̭DY$?roKݠ[ tS@ɟyψeKVVf;$Y$u#\(eYj'kd򖛏E ?wwa\.8q0_po>%`"JluHC_(u L 3BiEn"[ou|]'o>~XYU-i>N " 9g]woFӫQ9?(TR̙u)++匯h)%VZcCeȺ@Ι !g49cpȞ"D. $PD#kv +Q2U}0%_7~qq•N[ ) ,ĜB%&y:0:BU$= ˲\YGvWASځ`VvmyF toR ,ܕ˖vӘZ}Cʿ,\ Gu'Z-?oy5wث4=nF Аkcꔑ€\N@ʙfVEJX#uRn˂ 2XY`RV%Y|edV'i=d`$;u@E>f!8üv"(4ZskUɂe( RE:6W<7A{jBElI3ڐIc-ϱÊQHer2twwr*wr߅k ]*yd~Is^rV:eA1^&YVT &F% 9Z2LNP<,pLAĽ}s2 632fݰB3gF3rDesTHYxYW+m­YCZ:sْ Iad.F'r Ҟ:&G!sѳz4ֶl6=_;NxNЭ8u.')gJl dEI$Ax4v@O3^B0or_CWC6`v=&_Վp Ԝ2U,#ްqY}f(_ >p0\݌k'>㘴ӫѴ|tx\;XڭU9S}w[ EEXDR|SNqݻާ둛>LJnda;P L߂l>FNSgB=j^9U?(~E3KW'i"$R,Bb"2jw.ͷo?šha>+ Xv?zD+9ZcUzEJ0_-qHjeAzߣisC ׌ZAPxWf?M+ʄ'.oMslv-ib1ޣb| _l9j nxP:7]@o t gT_$D@q_RLzص%_;5'x d+:%h SR.QfS.VÉ:#$l'9LƟj)tUkݨ5!SܣWS2agLځT)åf5lzYEFWF*x s܉\78h1WI* |ڊ;3 Fǝahz]г\Qn4U_)r!|3+WHURL+\2Zᒅ+ŏCk׬lY)}eJdU1(.*Jl'rJGg|֩@rS-hZo댺RO&1M9VocaHTKԩUTY9dY ̀/}br%D (T:&U- r[i{u&[*D   }%- )+&\F D$\TDQLѡKZ8Rmdrێ?120(R.dLI,f3h$L`T;!hY@b9CВUuĤ+m4wN@ri51!{c8NA%a.qbB,X z/d"w^Y":w!~gmz+~I!_w{HȇE]n~Af]lّd+JՖ%E-Gr]|X*@vZsKIr4r:!lx*qq- '|"MQ!2+p0$!m.PYc|jG֢Y,g쳏"%EQ`6"øN@eoP 44GG#Oՙ&J2UP3hF 97O漓 4<2 u*e <3҄\w@C!(} 8k#i/r+ ༑g$#ݍpV2 ŘIZu"t;Ms%wc 9‚G"Hn]3q e*hk1T]|q7d;p-1"| *O *0 OY\R3Ϝd䨐dBs\?ECQcmPRKTdAKS I?,'.uNs(թU@Pb7&; ZP`-LB qĐ%EB\7DR!.Lt7_$^ۃdN!brI($2eu6J\^:%lKHy "'@lr8'gR39\R0=%TYEŭ$Q(qyt ,'ZzPkdfٳ%Jy<*3(,o"r[Ɉ·h(*Y"%)Su9a,[qg<#Rb\gVc ;.pcDi=5 uZL~JqiŒ0&F ;5KT6Q,b*WjT)Jv ڷ4 T׬ƔhS%DESi$H]xZu~:\vo?tmlM,,]hԎqfRcۨ7Oۖlܽyq~^N-%# 5hQ R+ocˇ҂{(ڦ > _W:&@'u>y& ^ּ]sShn `v{G 'p9T3e,h!qE"Ss،);NK ;(džf*ٍrenxY7mJ_‡w ^߅`I*l_eonf dCׯ\40;wH#̈́kDSH_<:^ nJF EFjѦ?tL!dih߃ :ewsxBU!.dX6uMKŘKő )#yQݐJ̫JJG @Iz }gtY2Mqh]8m~NÚykoy3{׆J{)}3Iu03Cs|˼xYP^[XUM'wE Atk%3tƼVydzFMm2 <1FD[iJ Yj_V1 ,4`*~V/<%UI<MJHqS5<(mt `xJxbi,]P3!\$dt`_UP3Bў/\B[7w_{dYjX 9t7Ńvw.x5[_{W߃7儋Gq)K@&'䲨a3ʈR:J= aY5c΂VAv8r#l0A_Zd/j*0^[=6J"CB D˾+ K4ORpK/L3q{/yӄCŋ2'` dA  /mÅr7 WN)؟5&n+9"4Y"Āo1,L(3MDtK$>zQ;1C 0hFa  s:Q& w rh1z7ogMMb |xXl3Z.5Q<G9nv盇tQ_hM>ـ=AfO2^ɯ{g`98J$?EkII!] 6A=wz|>yZM4BjpQMhH3JҸwܱ~M,(lrGT*SZDϾܲב Yy7NZLAvPP#qþPC) M(@Ǜmh@M>?[$'SF&6"W5#REiw{UQjPvrbgzTh,uƍ)XL{O%rk3{AЈPfGk|"bFiҟU^LAe֠]C[1Ԋ)E#́}vaꃧ酞O9nVKBhil[޸]d7 x{Zgg' C1ǫ |Hnn#-yw約Tj6,nUn yy[knq3O{@P S4.OJ Ww0%ߛ8Y(f!@ˡޗ#cC=(^ 8C\[(%%y3 p7pMac*q,} ube~ƜQrY< D ()&cm\gL&:vqX *R`  hwx7ŲA+\ܶ}+SpqȢ W#lދGUEB;Nz17#WwP_$=PqhRLܛw$tw/J1c\wo»EP|v;峇+c;Y}:C\V-K/Ueɇ7Ю Ĵv!e˛w全W]xm]]`:[;:[;+2ڪ.QмX^x ƴ~U- %}WUEDpzsR@W^,XمWp -9:4x~>~9QN9U#UB-R蔭GpVgTq#TJZeZj@XMe3ͥsB#s#\Fo]fw˛kٳ{/z3 7#A.Jw6˷8i|h^m9h6fc9c*[߷<H1!y+ ׳is ^Z ]_yȦ;\ q>gO86Rʡnϱ{gjS:DMZMY3PPlc|o?&ZNC1 KSm TxFy2C CΧHA~H#ɲafNs)PQ=E88g~bg~ SRX"!V\ԕPCʎ&Ը$,Og6J%D>NZfE^N p . 8It&-k 8!UG{gk- ϧ'gXF^|>(z-o[Bی[bLI5JsQ b*U O*UF ֩JQz $EABK*3Dz-*o T E[Gf7&>-DQO,Iʨ c~CgWJ(=^NJ)\y&eHp,M9F7Yo 7܍;Dau֡ƾTm]5cK '35$c':u?.f E?ll9ӣae◖?}x-1U(D?=pŮ?>0eQ-6_!mr둴{|0HM&o?~{jOfzf2BE@([onJ%\|5_jrqzt{O%?]O#HGiC{Y|[F[mbLje1q?%̌24Ml[˽uua몵/!A*YPqHTLXjH.F^$ÜRƴ?&d tYoC`Wo/6[J偐˷wVof΅‡hf"[Uw>o31UJKShgJHbز27wdDK󆱴d#Aku `jtQl МSvCgmYMT}^ xN\P-"܃눼(i. em& }$,L.Lh@5}`BkHz˹6-JOEPMt2MB))%pD0S 5LOhdI肖H"J‰5"\> i|4\)/}p  (9 8r|Qlς]WbAH,~2_ߖL\8s̗WqoVo/>lj6J1YVJѡYr[DH-1D#0B@AZ%EP$QSwј=:4mB4X*'DKd$h8>5 In^DxNkv $L3 a+9GVR V0P˅.pKg>aiU"g A߅% ^V{X? O+. kFSQ .Q#3fP6z7Lɜp/q L?̴\j7t6l@xe$v1K&ІSs7 2C(x;B:(ȶN"L NL7B, =qd. Z,ǒ[:C4uZ&frf#L1W|YNʭʕ*Ŵ![ PixW3S3\,A1u)Wh(1ۀpXʀksjx2Nh1>®2MyLM҈K {ts}!2KPXD 5tKMڝPPT$KPf2L:HB4"0c~w?\IdnMS)FgWjOĚ *ɚ &:²VXpjgYT!ep8%Xza>jH nWzo˴2FZlSW&ƥԩNX5H*(bF@UqhPIrh/c)zɩotğr. huқYL%x~v0.Q}P.,)e՟ۓɈzhԬ#ŐPfQJ8$G HB[qJx8`Z6ZZc+GM 6)k#G FE)s4W .B$"oY`k!%xXoPD{1 *;K FRXJlmw5])2Br5=҂PTӄ :G("1)9XtC(8dC,Ȱ Ou;/W^7/S բy6 -ߖO֮ځ]ʧ%m8D &j!I'2oN mSol{T9hR[s1M9CU0p5i5"0=ϼQ7 ͌s~Tifw^3ㆠ:=8 OdPl0;NIOj5L/˴4lD/3OVh'[}䷳Y xBι|NAdz<8/.4κ8:ٓD_Cwo x8'}V[mmh7!rfH{ '!,0D2z  G;蹥۰3x843KFuIO #A.0a'Ƽuvt6q^ +U*'Oe<|^rnF Ý76nϋ;[\Y5 9 Qgy%Q! pt8Ǝ`$lwPQy0R,S0Bji/g;P>na^.=;|]q7eD_3 I\?N6ه4A8s./Rvտ{7yU *ʁws%/L+?5i@o<,ˏ 6\N$/v''ʮ}Dk $.e H v2yoso(2a~s#Tk*ҧ:R-8ܼt}FSdBfJ.2UoY 7Q4Wo$y,G5 W ?Waݣsk(ZoiV$,qf x]Xl///֫srg/_?^lJjC껒o}>]dh묗]3}yoNtaQE>|뛺օv(8JU*?4~N+ ~jԤ0z't .\bTJXΑ nXͲ^p%e`!_̓Re܉sA39h>Yq:?iy0A` +L 62bvgHb@/ 59|$h5cDZABU_Gjm0HQ(+pϠ1PStP_\jFQHq&cF)OxjE+iq; (E.Wź]#-)!"Z.=G8 H  (,!",$۠X`grNk*~ ;"rl9)m2].J6l~x 8l2[|3.ir9C߂^[,ܹNܹNܹNܹrgs@}EW/䔊]p ipJL{Lfq?aGn{U@ v|ae`^"-Ms҂͂IY|tIpެn^\}2^m}հjl s6D_|^r y %ց,3]MCM9v]!˻|YChŀm@7ɒ}޻)L_[챷m~Tg0+>^kf*`+6}v\My~tOp 'k7^ͭdn G%ߎ8jѹK3R3.T=8W 4AbٽunР{CQLp%&Z~m~}`FHN{S`s_Qt+ o Tsq$ ܂g9vFH#FNhd EխF:s պrީ4UgsmaٵׁJiR=Ơ(E[CXpFJg ћ4"TyD&(VlyS}#m`3I(T#cK#h I#@ |&rD{MBrK1@& nBя`#ɎZ,TX& %T$˴`!cVcd"yRH{tٻ8rW $, Lq`7M-du 0=eR*VץcKjO}yx.<9 GѦ'vӒS3}u ̞8}̐7>E)!o| z:bN:~̘)9i/Ƨh9h׹Z{MJǠjLcn"DBn_4W1C=zN) Tv[A{@Nj==\<w|$2tNj \Tȶ‰K> rQ)ӂ0]ƧrZQAeR;s)%R@# 9 B! *y5~WRLhऒ%S,-O| P "P(A2t8X8K2o 0(h=ly~мkBsPw*TfdZ){IR!tIv_~*JQdՔO 33gF!F)O$B2(=8;stqt \1.T* ;NW&* G?F^ %iΠ9O ,7vMVMӦ3zFeb.`Ôm|+J_Y4SQ(+C549#.|Ima;-sEK ]\gnu~X_7e뺯W!" zby]N0ZO靺Ktt^ ^sO|Y>ZMnm}jsz*xF#'C$6a\H8|ޟ_Rm1T }5:_V-ZA_~'gm&>>d`9v=R0~Yz9x< ѵȯ~cgCɷ]HQ椤hjNNnMmRHJ x@[~E.<ҳc_L^MUُJ}2}|\7-Y~DF!j]DUÓ0P 5{|Z!˷hH$A RiqF{tDʒsɽ^*q9?؇c0+,>v.z8#ܰ5_EO'Jγ].I}gO5bcv=}3ҙO}c+獧;L'9p #]10;(Nbe!`vۓ]F}js|UQ`{F}QP>om:z$ުjP2izF}<ےx%ח  |dCs?%ksLEm[M]]eUI:=^PN$=U[v3.LU<5ߡ6bIEUQ_N]kHݍ,LZQP\{Ty#/h aU*D1Yp )ʂ[nn (RrD !**ld`huT d2D/uUxЅjąEAb!g}" 8t׶ҫ 'pJ;}4ơ½QKiT TȤx1Xƽ-ĺzйc,[?uX㥩fP7T;o(NFNFunk[9/]?-7/I{\>ݮ6O%O'_ .KzɬE?^w.zK(_j60NOA3A,}[b΂t0!.NPIU祸 ̈́^a?_XHBLn"0vSw{u_H~G тH. )Кћ;)rJ&&+>H2<`IGCasJᴠ 6\+%G5/Hk,Hqg FzKVZpdLIamױX*{<@uYx:GKK>HtUW [LԾd40{1>tEV ,/9׉`Uvv e\TxO$H@Q2Aςza柆Jyn⟖~ّNW}U,nG5cL 6J*tOgX,'2*Kaqa[419_xd?c(OçE4d?N2c8u/:(ea8s~#o/VBp`v[0&H@ ({^DR3Wښ휡4 ոXvMaE f, FlQ*qM6ѳx90}CzC'O}mz뙒݇OeGJ0r9тf,\.ah{˧N -fX?9*ΦMNF@%w7;:賣LWeUEL=84r܆m} J#pPrӱߤCo+,gs99 "!}<I q Rk! %5clLrW&gڐ(͆&8dDAspwM7 WDԧ7XRh'6 $43޲V3Q K9ԳԳ̈́ { 3s\fؐĭEPlV(ڼٓڹ0f2f&h )9#KZQ9 `l%em4ѴH5fFQ33DrW/9&ń@?70=|cOo*WtHMG>$cCK*4>uۗ6k%)Vs,Ɯ[0kGbgW=EZ!Χw⡰vPV=}D[Z0XZiI51O6cE+z6[wqx 3RV5Bs){s._g\uŠ䵓oUcuX>I\ 73F}$G#`Ǜ2XV7(-u7V\}͸.ZjG F#.Ck< Jgl +\&B R؂XxʓLBLQ{fOر-W?5cL$-x߭gݕUJc "-gIWABr!.մr99=8 rPul%]ĩjF,SD\\Zhn"җ/Z>/ykOg|o׫Q 0RXBb@0eBLԌ^fleP+C$\,28&oα+yj(e1b%ފ0J:@Wˀ*n!ՆøA`BU%͆ng{|n?-m!&RKù^D*i'neI{jOqV zt($&/4,qf'q"ge^Z@Pv6pђtAW 3+\P8&I]%Fҗ O:˸ER32x>Fu9 ΝAx,)?ߤY5cJX ;*D*5ti6;TIr5)< (E9 8"`Qmgc1t<5ΊTDBXWHKP <*/L4+\ nD8ӧ he>@`;J\Yqەb0te^ L(,;S 1p)4`]d{L.fW>C"Ƭ`勉VeӠX%өF`a!\[㝐n|D06x%#^ rhL$֚S=$N6*~!-Z^c(aPUfLRn L0R3inWOe F&"wC `hW 3W ?=|{\L%3v߄se+'#צy-n=?e׼v<-/[Mޮ++{W;q O4[xw1>v踽 >*JZ9E4r }^fFzb 8  eӺ,lڗMlY|k+x]IQX,j=i6D9kkO.14$߾,hOM-Lكf/Z'pxp!F)|U)qi_\Ԍrl3>HmSe|ȏ'.&9CEcNH\ݝQJW0-/+?8 =ᖳo9B+y#G~}Y|T.nބ)n5\S54TL->::dxW $Ea^mPwlzpt--&M>::X *=S|K7L=`2<7f?`8v,}s1Q&Si,41} lB$;}A%w~} /JߋHj|YOoQNuy9b`ErLx]-4u4AQ)ӂ0#RTN׊ * s~z,.]'+9eù6Ӧ֗ƽC9 ׌qΐ;@WܡŃ MX_/W-Nl>=:+2UTjc}v-yރzacYgpFCSSR`\f8LJCzvLkl>L!OF6.teԗq"N==9pndy`sV*NMK5^R?%a+M o%׋ޫ1n#\A}N:O$(4>!leҧ.QU"pSy^Y$ژ@\$ŕNVkP?6\)E87Ai @Kzl,K 4囍*L`FSR3:e \yZ& ȍPKcUn)-޵#E^;|xl/gՎ2;bheJTAV]b¾/bī(F2An#sϩ՜ASq:Fs=.Z DᤊԊe;6]l4:LVE~!+cks&'o_ىX^eA8/汸Ɓ?O.tzJڌr=eq%F=<+ 'ɛ<{YU$zS_dȌfE} n]/7@|> 0,NoQb?= _1Wb;ŵdAg't)*n>~*bW7?0 #4{ '?3w'|»Q]d7UQ4#KuAZyN] :Gp`ԣ0EfFH<.ImpB R+&_:Wk"FfYVHvi8LgX7>gkM?<.9HSb L P }C[Dv #˳xwy\ $V8ጊ6ԁ&k!cb(OճVJѳ35;`98/NdDD4'J m\MƏ*e7̚$fv0n^;6Q"[")xmX+(GPޓǭ}%i{F45hfezx5ڼtΫ?z;zڢ\4nȧ_&xnv{͂5-#/?OiQU(^ߎg8[c vrgτ>A VpeA-}ЯG|WZx~==.1I٘@ݴXzI>",бEw .n&{\lHb;CdO#ЦKZiO+'=ӫw;qB}Zd8[{[Z|xQ{]cǠVhWw DM]#+.آ@3J]$-;Z3CRkh}" gbH(o:жR ,.:QggZgXq2˧[7Qup+MGgEj%IAG[gu -ϟLCJ5ZClLS[[0UvG-66330NNApbx\A{jmܺ&aAH&>/|6NX5 &i"g]77YI ѕASpֲRvsFQ28\hCKSîKY6{rZ h5Yu5jBsj Xk)KװJsի Co=5N@<`Z|K3% J\N@@k) ݧg:}zyVo U/*[Knx^ϤuXoxѽATv7h d$"JmWk U]0 Z὏_>Ogho_߰,OG)i(._HWNSmNF]\3cWq]Y-s;~N2υeT ,-Vx(i}8El[5mIå:NJ^ T;. LJOf9ʉH.y!Rj$A,;qTs s(l-Cʣُg/= JءgN&scQ^;H \QSrkC^]<7B%PF7(,hȡPd4Qi;=!6,q}ũtm}v,t)[Jf4KrddZ6W:Q4|!-G)Y&gaJ& BшnZxjI=ǐ g@qaDyp5ׯ^-q +A͐. 9,FZp6/#T:NXkn$$Uo3[I([$LMq!i:vRHKlى*p_@ː}Y߱sIoyGω\Us/9ŀyycnD.<&hY0 29)) R&e euxn[9c@mx|@" (I}gj\@0ĺC3pX>FUb'hUiRiFѽI{-,^I %C4j=G̐dڻlV%iDl(0{n^8~e#MA(Vk%52GODf>?;`/i3À*{<{:9Q+v{sSBl+pi* iyE%N ӊ~S64Uf;Y/}'"{`xN~|g; MWxC84eo3hkՇ? uu0s;g|ʃoE[/%uRuP 14Rn5I-/$Rڟv%ݯE]E_O/}u.vXH 8Yfzsko.}77|x7M*I#B3J:FJ+fq5Fʥ3 kg/㤽A"%5۪!w y}1=! o d>(F^! WeVtQ@I}>E;|>)TBCPBh1f[ .X.Co1$ܤȚE򒔄~]!`ܔa2?2)H}ɧ br"(x Yn S~}Ev,֕؋R0َ/ /ѝOiuV|)2*%Ŕ0K\M<~81o$ ΃!\8cm$䆂oOיy `DĚETF(,G^+oP'0.EhmF/q6oɿߎΦS$*OBJ}L1s;2 cїfP%cxh)^z7e8T~<_z@eE܌7/QO*[d#jV8a:Tв[i wu>:/_5wsxFz{BHYWaQww>::&qZC0gb>p 2 eEmaI+8~ ŗsn*ɬ+<b [i >~愈 Ej-yMqt8ʙL~#(D@D#$%1a;YR ʼ>=}NO73%1kK j%%H0&`Lmj2Vμ}Ѩؗ?Z[j#A5I>عtg`T  أ$(kvw!sp9 dlK׹ H)$Ļpf fd7t 9 -atz5/Ėoh¤ɳlPz\Z (Y,}!GN)6Fx*ahJ\7y b< FH4B, RF`"@+GExMxM{4JVemekgQhb!MI9DE-ZE xbNɤT QHahiGmW0~ꫂyV"QQ>abJE jaJU}lv+^@ X<-JB+=c'SmlM܆ˀ9aFmvL%ĔxBޚR9sV's2CRUT,z¨Ēk|O_|$&HqxxCj%GycG l&M01}[RKbK8C>Xec..&+u.,5(f UE# -b 12a} !Lr  ?u;v (XoE;?=p>[⛶ǂ.(;U?ͮk!pyt$piW~ٟQ8O?^~ 7&B03"9PKX WqɢV@͝I^g X:mr΂_9?_Cqe+X%2VƉ87\\%|X?Po"@oNZӥ8Sxgll*`TP0$$cJB:1Awwv`3QJ.TmpU\MK3s~}]oKwMRݙ|ҟvgA-H=K|s+o ߷OQNj1|U#e7_l-Ig?mȃV۲݉x>PVGPJ4a%@B_$__9owPs[ 7䥏R !Ytot:T9:.?\~#F5ÙYLӅRy71 b\+iK!˝XsKrtVOgUT E`@"p\] nFFuF !h~"$@5ݫiZȋo#g]V:fڂhmUP5K-S&'Ra$M9B%*U>d" ymM% :*݊4GJ"[k;wf@ōƍowUftÇRtd|:O˾zʍd=hi IrXCQh?1Pdd@؀{hD8zPdQ &7xe"YH\%\=q-[\V1xP8 ?VQB+cet")LWߞ<z}O7|xc.OJ6u_y -5H+^FvXנq٢:c<=ޠ{uC筐iImvLk6*E@V b1Pl<i(=eVh4 GX!qz&@o2>q;p"3Q=@@F Q?yhrRy_~;hq0bpǓN~[zTuoZ;s! WyN=;9ךum^ƣԝLHyi!@5v 5Z`"<$k˘tG ZBXb[ż^w!Xڝi-һuDpzmȮD7_6:ixAYJ7.OmfYzK%b4Z=yd.eu뭓O>~z'?v2']մhom65-"gpz [ӦŨ768- .Y}ꁌ{J[|:`>~_Fh RJ5`z##ktS1E.=L%Oj&${Iim|MAKUY]0 X]/x?9&mp ~"@:7| {5 ˇZ(˫*AxR)I\TEJ0dDJV'*I<[ %zcyx1FaΤp1M"5:i-Wp9Ŕ8X0GPms-DsqUW}ǧ;%bkht%l^O:Q]RN & {ѡ.Bd,s6SrI mLUsȾo+b%-2kT:/ B̀;q ktv㬲jSWWq4wݰw/M̆&MhA[{r>>|Mqɦb =_i1O֟վ`<|Q fum[%x g>nI` $?z]!S]Yײ-k%mn4::+D$Y5NŒhz"BK5Z^C%gPhA GL#BR[<`>c8)Vk J2dku mdvNM2c8bkODEo(rTr/L^R B*{i=X,c1md{o}ma0 d?-X}| b5\ x6_ܕW"T Ono6A {*V`2"/VFk[3* cRP 1VDVuC}cEopҰijO9׻.0嵔5 ~wg.}6[bsz"ntzSAUcnX7yۙd%H;miw^IVok^qqQ'ᚼ[7N!^]D`[6d<\_n*yG5 )K q({BU݊w̉h9 ițV iCv v EJ}sa<]k`D1l{-&)/-zGŃǓ^e!`E/&x]͑@Y y;ϜVZb,Xд—^]J~3`^-?tԋWw"ڶtjOBa`ORp\NuԄ\κ󖠾N]p9xZ8[;Kѩbdo"p5VacaUY PE#hq;/8 '#bP_ `Iň½[6;mT{y9fﶌfmLd0sBwu*;ߘo%b'ﵹ@ vjYv,Fqj~y!kᕬ+YsfVgaf%+ywQ$=^}\\l@}5 ˴5+aBEVh*uu疏KejQwpB-vVw%V/hߩh֎om^Dtd"v޳8X`ͱ¦ۥc9]c|st(pC־o‚[8qȂBW#/Pq"7վI ~>ӹH)91_<Ϳ=jlUnۨ1ѭףIlXO[pk1]vqXKI'gq>C,]<ðƇxgW2|=x;kM[Ak ܙZmAʹb3WJ%35K`PMIK%A!PNդ"oZ,ڧ.a A<{X.x90&lqCtt"&:ȘRWųV ;f ѫZy4 UQ@*L emQ\9tb|yʱbM’eJ []1I/̄,XDNb9׃$x˥j-?v]}jEcqX#&I`LAV% [wƮhle;ţXW6ÕӋ߻|x6LDu 6C [X8wV"DDmA>q YzWf>?/K"i4V, -ǃ6͛O\W6k)@gz_1e;Y0fwd&vGKvoѲݲ.sxtvo9ůbUY}p~(N_~_ OOk`>(˚vՈ<c0~)JU?M ԖHC{xHсfE̚WY(Ȑ\ 0a:`m.m*E:GCs`MødƩJUثw5VךO_;"^kTd7H]=h4>Ziu4^L2 ]Fo"ruXRSkpAxumw.488Hy1ˍ|s}uvL6K(e ng5<yF1m!)V3T3 "j?ln.T% yCdIgFeDiv]=PҒ!%`mUt1?c b>&3qY ɓvuw]RWY5{wj$bQWn+(`Yɟ'? ȟd1 :Q9|icԹ׃׍ԼG %/W,x5P +X%UZl*yZL Ng] ͵x I CO3 `p>k6|Z3{xVț١dxKEWVRЁf\ԎxfH1o!Va[u y~hJWo 3xzwb(m~s`zwOƈf<}Q,0C9T=v~{^&` XJʿA\+! Rv($b,J:SA NA$cV}`Պ{ĻYkʩxs^!MY\}cNۉh Bƅ ИY-aW5 ejzƫhn4JkV 41C)M&D<9vr1D@G-]r20V*g*ՙ(d*.B!Jqͼ4N1V:P ւ;NOX~ĥJE?^/?K Z9γ0gWKŮ> eb"0"L$+,RBV,y)m 2K棒\g]rY%"hAr;p$#)G9ld%"R) V1RrLqeDEoԥhyHOC1T;oX /B麭%n2vIF%n9 ! ǍD\4TrTQ&nVZ;︍Eढ़W(;BMγ09a0j 2_|pl @&@q69y@V4Sx r^+5GvyAT <Ӡհa>F ff Y. 1) J(ϐt)RTA&"y|“ e7ᡔИF0y&FTk!gwa+Q^'t[U8Xx̢) I5qB+Q)y. ׼҂!Anѐ;h4Q1pKB\tS3gՁ|cWzYWkJyI 俨mХYL,W`Xy.C|WnK6J8nWgBuz=*>}Sx#5XnxW5:lל^횏]7#b_)e[ɝ*McV' 5 OjxW _ zȫtʎwYHGkbY/h𚏯]7ВPc4+>twnqG7H IQ;R]q(UG-B £|W*[UGBI)G^faU# jh>l;}JkxR5w1s;/q ~9.ʣ|5O_1/ri`EytzQ.Gnl'R.qQ5G?m'Fa1O mcP0P冷^у84mfVst9zp+h;'h#]LՎ}[Eo&<>_/.^~fCnhY+UxǨ!7w.{dNmaII߅}zaeJ˼ sm 0Tq %t4YM }mMG7x2h|Kp8AS՛d TfqcB4)8 N(/G'?7İSBrlr;HQs܏~pWextW ܓ1ۘk4.zmwy#M 8n 8<øT^1UI ֧>ýsvxo>N/,.بI.Lx;/܁`cV+#FKg!GZ7~x$w?meȏIF JsA$E IajڎH& фnL[I-~`eo\*EV*pD9-Y qbMϱXrb"4%K=NB$,=S%흕nZ!) =_XBgZhψtĔl%*7hmYTcb5Ƨ̲1C~ Be2)!:HY0"ZAݰSmγ09Ъ6~!mr wkD*/j@m~%N ]#)7W +2:UE5Rߤ.r_[ФR=9ݱj`!0W̜G886D0>"]N.^_f(C;p8n@]yR#]Cp?2ȌϒPw|~SnT3!6JpL(4NHU)2@Nڞ]IWLy4*px<OE A"=7mFR(qU}DT/=YD{CG)=#!8M$P|s:#jØ*(MIf0+74yFx%Z4h'=J4.n5X5x`AQxɊ:DRk,jAHTp,#1Kv '(rJ֫k>3@q\'HfYOi~.2&I ̉. HMxޡF^ &y4{T")g< LWFK(*G;t"8KU䪃29jH~jr7~j>h%І2FR1*d,RB&iϨ]= _SZí22wɲ[ $3̈́v66_ԒBFV J;XK@6"X/z>a2ޗ:9 .FU5ȵލTۏhxzrX z[LL16gwXK 㑐@d#'Q6&NQ"s7YOHM;_O+@6 OݑZ@de%X/^Zln>rެ1T1p갛*+nIpAMAzBlcƼ ArL$ "߉)e#5V 6ZPN^6Qnm3"6:`FVf69a#1 )1%=xp4@aKbe[:,IZ=QP_j)= T̕w NJAi>%`³ bu\1\PRPeGeg3\ Fe ~Ba2HX+hx:4^9ZeIq#%.لld Em<)adbD7]NO9 v,.a"wdu|Ȍ0VCLf*դݎ]O: vVWRX @ڈ9|n;1 DS`m(y$y!#Kdȁ\*6 \I3~nǏ]]izK4mjmwUFp*#c‰mX䔓H` qp ;٨xcO牦RR"_8~ղ)(d?ٌuSR/KG@p &x/YQwh6-j|2i{s{]L7:Sgg78xnOc+ަGDyJT)|{ۯ0æ=p"|{ג&-i:nF!r4~Vr,?{9O{ /7ӌ%^ /{C{Nr9KF 9)h#n{s\b{J5lMKo_p ]$,\ z}Po'rAE yA1}HZhn;BT>Tip!UD(-bC+AhMۂ}8]~S֕{'VA;pꪫ pYƙ*XwY(z.N_Ww8[W\αe|_S 2/W)ӷ aSLaZ;ۖA6qdq݄ƟsQԗHKT+קk තƃ/moۆ<c/,ָ}DLP2i蹒1s+^ 5XgUHCJ}oO;rcn#(6tA:[I1Z^fLjI1\N p5!%<¯ooD.u.*cm ̍)YRókJO|3? aW&K0#X7, B 7#3d30`K@EJDxpKaK"ϵSaOu$ZY!{μ0;HE)4[!qhI5ն7/+#bX)f@'<Ӽܾ`߄/Ұ|Y W6|/_֢vq/ ^df8b Dv{y獙!e`^rV?{+&ioLW`{ۏ?y{m]П|] 'q}__O7y4Y,D_)02gOndw'{Ej| O1S*( jJ[,œRh/GKV e0tvP\o)Ji3/ӥF$Bn0j@üm&Z㼡_\!mbdA5ņ돓~_Wp-aHzACq$勝T- Ժ4<=9GO IdŒ9"!p-(`rQfhNS;#\?i 2)&rpk.L끲yc6?i:h)G/M`#qF|~HUjTɍ/?</\sa۟_iɚOPтlE D ad`DhMT^66X vQ) `bj Iă1*P$.2K/03ٚTg ,v}M8![=GѸz4 7Gdi6GEB|lqtWdU#;?@hޤ/0(U]Hbo2]~(c^zjj GaiTZ!A&L`PH<310_<|h?u[nT^! cMJڇ))|> xrs?_r3gw0>3M&o-O^w&_g&OLXރKvi<$}LqBW UvL$c۠8U zsV`Nք*5f֤ᚩV`mĈ  EntKلIcg 0`(A J:ؤeJ*NgΑӂ@`Ju3Kl|O` 12,{쵰{hc(%>6HJ}/٣=MAl$=İ ]^xLΓw䰑t-7MJL ^3@7d_v4> p9MN㿍Wr Cm?9EmQ.E9StBfEA('L&baĘ7Qkaj"x^A$IYOzdJT5? {]W h*;W[| p>a76 (Eݸ$5f`.4kzMAuW2'>]OہiDXh.?PggY" ibKM iVO'սFgUޞώ~/ xh)~ٻc&7#XÛ '`4jcJ1)JL=鏣^Ao{+O'^{ߝ}N)?@z0<9hK6c }M3 <820B_QMuY y\nKgu^QyF־W ϠHM-=mJq1DpZ[oiԷ'1TseU=!R{R5<}KH<j7t˖2joG_"1X)9 /V){,HF&Q꠻)%ЎG:{ 0NLIQ]p= (t%GB"L1#;m23O؋},6K{}RUNPz5[qV3kdIN@}@2w3u'8]?{W㶱俊0{#M߇^kOs<=I$ƀV+G|&% GǨ):6 [fޏ0^󎣆z'I/zjƠs=[.=|6[kM@vF_ɇhL<wwƳ~ud/Ew@켑Cto#zyD0&:(os,N|RJw3f z神g2 lnYzݎqi׭Є7 M8H{N0 !V_9@stwV Fν9?@ cLqӀo<0&L*x`zjai"IdҦKv3kޤ&øq|D |Z2v2Ka)sw)Iok/}(b3&V;P̄$ou&M&(U:);R}@J AL: Ԡ.ߠ>"#ro6sFʳ3~=8`) /pO(^}4)eSMNJzD QCRhRi*+2UbIT֢^޻ =}C6lA"XΊǕ猚WsjR#+k%nս{UD_"rNG+lG$H`X 01upY{#r*s4je@1*a% `[.1Gsq:g|`3>ΡӍE]#g:Fj2-wt[_0 qxbNwЃA-[Da}KA2Y ;~ */ D˴'*%JB KG,Pp,#;e(pz=Bjv*QuóBz(/3,!DkQZlAiz,aB$KRBQ HJAHTQ(LPp=^KFhU BS&a4*+6-62 Q q+݋?r烍6B˓J$H)fT8E,q,{lf F )F$\VT3j&(7`O.y*w0<ӻ6w7+ˬ_͞x[3,|~\>]?**DO`B B xMo "7m&FƓ)/o3x㏇9dta!"SH7O[^`ku#b=CRbvLA}uC[cڃ!g%r=Ӱsyw UZOʀ&Hd2U\6GHR:tDOr$TyJ3EIHSGJ嬜pU$1IMZf^[%ªaz }2CsZV*"K bǖfH+h-JT1\jOOĘ|aGVI\1piQ_k >}ڳ.|hQ bQ*#spCV-Rjێ`S}H5QbƽQ%, 2Y+7s6F&˹%n۳Grϕ*ۉ+ ӓeS8R:2$T!v:)ݩ;ʹ )u|7?xޞpp-x\p, !<^.ꡉT T6{#=5+d1Fv'%obtgG0QP@ޛn|d1m}ݾ$\ Lsu' ^[)nmdЫ ynj4F,2*uBXbk#yz=<^{#O6;ovDqw5s rDfGY>fv&JS q8Sh .P@h"K1kRvk^nIܔ>MRTkOhI?\r߻uQ2.oQ2ߣᡬ9SJB, N{CO0#Bgsr#$x`$iJBȧ0 s`M<"KJ !d <^jm]-{x5;VfD.E}DwOG4{zy5ѐJtl%4J]ZhJ0 pT5/C%CΩvirAȳ_spQr)^7ڞp خ،J5BڦגCciͥ|A>1ɼOE*$cJ(lBॐBk*> E - L{%8lcDaR[±HoDiDMZ^P]sJ _)֧F4`Kq$pd|%BVjr"NG Ĉĥg(EJP}DZ(:SSp s Gކ(MgþJS|z7jV|&57 BTۅ8ڻxFxxG`ǴEP8?o_P~ޘ-yU|Cia]#IcQJԭ,I>TB9ĚK$ %$tO( 0oa,gs)+W:c1.#3>7W<6?DƝ]$4+;t9BKקּKJ-ZKqTAuԦL?I&3 ǡV<]m薸Ι}*m0K04. ͷOן!kOӕF1dF#[{XP(j񾻝$ 2nL#)\6v.5~_e~L$3!G k78I]U_|dJZ#U̾ZNT^Y̾${tn2#s`E RJ~ ;4Aq,E!Nw6@b d6L6hkLWBLd+-  55^ &߇uc$3n !-(.(O %$/=m |8M۟\V=B&v.ߣ.nǣӿ!pisXkpfd߾`SSՉ܌8jQ~vF0rc`˽8Y¦„b){ٞhҔ:7M %n: cd6MmxRm~ffHkM=(1R"\43/%aCd11Rbj,SC^+E@ IO /fc'*I%^$ Q?, À!+zOw+gyM>ga}nbM|0s>$;{kͶ*PJK OwIL,YKv~?uMA3;ɠ4oX9P_g5EH+$dnd{WOoŒ8FS{WbtDDZF'Gj & ӈsvv)wtv)/9dzFTU"K\iFTo)Ɩ36*JH##O9_30yЅz0ptJ^GciwXéJҚx[-JM ȞJM5P!+ZKjuЩV@YTj\̀j2 bT ;;G`hQLȦߐrZ]M#&'Iז5˧az[/9DttWp8a1|bNX4&0WL8?]m^1EIl~Ԉ,Im#l.YlB-Ri#ք;bΨVmjY-%Oj6kM=4y#6gD faJbg٪EHՊl4β K%V$5;H1Ÿ3=H)mY!;Qņgj !D߽3Q纝{ڝMeh*_U|Nچ2t E(@]@%qĄLkc/ê@eʡM&}DVAֲ֬E U՘l++jcc籱zc% d`\~eVEWfKRyB]|5J$1DvWcBp:!,ב#:u8qӶ|;8>h;/FtJݤDLHG1*x,$BXZ)b7 z@@ у@HF9,foX!~އ 'Z#orap-! a6^)[ؑjBv+_lm$ؖO6lg@P1-pqNo 6L,҈ Ќ(gAdJZ.hD9[^` yW 7/2$e^ڽ7YQ2ԥ$J]! 2D$Ԍ[\DݹXu#65sľ~R/I ick.='WSOVbt@35&7M]l!>vq]=9DZRbMWəMMf4%;%c/;fhR{:m;p7q>FK>2myںbpm7} miuA<@$f >Wh% Y+"\wCwp[8`!I.ps^OTRa\!m.im t aG,9!E'H~js"}ֶҠ'J hv*4O{WBHz| vgyҫˎ{aT v nʫDjI`icĐ+-\8MthŤ<|,oDI-WObtuhQ,Whdog8ynp- 0Jk9Hp[ APK  ~kЀ7p8e˧s[QSbyp@3, =+,3{T;D SrxiwTF(9yt5 f+"{Ǫ6@u$-`{`hF3c2 0Yk)fRrbK4VT+&Rt)Rkñʖ$z o"rE\݇b-FSw(cD$2$^]v^ɏH(5~*2$dP`BM;LXcUK l /N`F^*j%cb% x,'1rTĒ&D2 Eb#A-߽yӗݕI`z;rk wL,amQPl6e\ì+e ,pͷ$p~*@6TT!~WfI:p@5n0w՛kzg/Wm uV=ŨCPBI2fqа.0Ljm4ZvJJ)$VB%)=DP";l=n]*j5;PEӡTQ[qSŀ輸b66wT3 QA&ueyw4y۾P* K :w'YPͰ]8SةFOybgH܅g}`a~ Nd4WB:jӉj&5J<rݓvP>TΖN2w~6R VRuzLdGv!_3oZo], raU>N8QD^tK.etJ)WJB!'W҈|%vqĂ}pKDNd װhuAqLך#ʲ^ nQD0ys٤K3'|v5٘NPKwbԒ3$%hI7`Hz*a`zE%U.+-W }gzriW )VXIAJAZ\>D]Z2jkd~&p75H hn*‘t$1zE0ʆSD &$Ep5-ye]q/4I`- x3RfHc,gNMR=H;Fmi#!P%J+ũ m1fZ,|!5w[:k-¨^UcuЪ" _[|Z >R=G~n\.9埿.bڢp-mXʴA;}?&zUk곷\"1`ʿ@, BVV÷.1 U7 F8k-?E[m[MI7%2Z Y蠅VTxMKaK慡RHFhj+mƦ悲[LsUJb"32ZSΥ#DD0$ H#Ɩ1("B@ث.j—w]eq摗sRXI_o\l }lר ?\ϖ gcΐެtUmc%yr? W᷋t:ݳDt~~'ٹ3jBWWt=)_)rOl]˧>\Yʔ5U.V׍|&eSck$w trh%]-ޭ M4˦>v7nZ1x\ RL'&m@Y"C5^[vGS[hk(^#wkuYT~NDsz}Lۛl~w\U"& nӫ}nEKbvNu='?7w^LjKW> ?wR0ga,_|7'Wf/ѤچN0XذX͗?)͎yQ<;qG񼾣~YAC`a`L w(׈r\P. 1)a~~=imMrz9Y 7c(®sٍ7[ qbm>_ oyX|w~W^N\˻P囙9_W8??S%0r)gTI>P_}\XHGKpUoa(r֦qPޗqdxűpC J iF@3mu,*4Rx]#7cǑ+ \X na!BB sh*-&Q\R9KԪy_gspȎr v+̟'/nn৳ Y?#1 r V.,>ES eϲ,_x軀r^4aMܐ1hA#*(dV:ف:&ij R8uL jOq%4V$EK# k$DfipLк}6 (ڀՅEZOKc\H. c0(R0$Qp@\ǽ#4GQXC y*A 1,Q9K5oocr~1{[e'vut UJʻ}y+2Rb-yfYAzI8_Tx=GADw]Fd'M~k}rn//RۓW‚ȩ2aɥY,.qv};˅ ݦHMaH;v}K">Rr"V_F;cdv}N< "sQ#F| NJWkmKϔ;g4{S$C! D%6;9{&E!žPKEiCBHUPԗ0 9BlKTZLlz~SrU'8%E5Rwf<_@3"K#ڎC З/^dѫPE90s vèf.QU)#4vO:gadnwgdk‚^Iwc5nu ͷ&2j.YXsBp++1T4sW|c\:Ŕ0rP s$QaEp s>|M\zK426"ռ)gTٳ]dTO6E7Ra]ҸN#kR=:K<`z*õV:jOh`u}gSJ(@X1|@xb S^U63)n63'k#ٴsP"$&MLjR ֥_XBv"\^7J͐ر -G`9 Y+_~en^ZG ̌%%|`U^bCCݚ٭:OIY/VHa f3L#KUFٻ7$W~5d}ʚY3 ɞAd塦Yo$nb3dQnAHVE~qIc1K Xy.Hg2'oF ɬ  UN癖l4s6!EpBky ՂQ[ 6F-JAMXUXOEMdJ02{d"𩽌5EFJRk`,x( eFcu2s\ Q$O oCoF4l c #LETn7O 4zBH:Ix;~u0ֲɺ!\1n4J}dq, +SLqn?ֳ[6NŻMh˖6F ]2%̄ A[ob<ŐeB 3' d&BW^U"- x3b:WLh# Y$z~ƒ-ss=:{ lZva95{\LxƇvz+9 *`0Qyu10#[4:âXeD*!8,יW &l&rRX E$1h^ "q|b^;b*nN?>!su(R;Q?jv(nDxJeE`iD&m@1 <ƶ)LJE8Zٕ;F% \Z\s#% }bK~@;&?aܕ1Q. *UnL(Ѕ%YzNх5usۛ^B)̹҆7b5lw]%߿^I;z yT].f󻛖}Fhd`-+vzKDp!VG[@*Lr[mA1*os?]+V5Gi ~bCdo&1\|?X=^,A6G멞,9# hxkg,o- wu,z5 Xlzr~:ݣc6 sj?أ⭙_>G"^SZ$]6a k+KNyu@&WuV}k2+&trcs4bZɜKCx' rTmJ|,mJdLL^Llc3!>liDgz.$אROZU>٫qҿ Բz"M0id.Qm+5AeorI Z[.IY.{ѝkb2NQ]U<+Ut4OOz<>]EiHEVaꓬιӹjaYvj W'nTEnA[Rָm_6̶&Jt:ϔ 0-Q2.J<ű=X#p\X>,ۍeQZ{vu&>h -4;Ηۡ$RMF %e9>^I%Ku2>MKZ{7?L YxdA#,m$i"tDF^$0[ju7-pˋfzy=.ׯN/ᄄ1ykxiOM> 'YwT=R3iw%'Vͳ|:c.OV@KXw;+vv$ mՐ5/{1wXi'N'2"#rdᒋL" ϸAp,dxKq*R׻TSs= /wtOQ y=̀1fMịNWⱂŲLaE}~ј-v)6fvoF&\3rx€ߠ%̌_G/y]}6/jÝQ!^*9!#A 3쐉LZ}"/VO= j6 7ե4ZʐiZ%FH|Cc9v2dv~gLIֲ"P Y-(b;ĂF*c*SLbw0M[@$b 挈\oV5h-t©>h'It#&Ma!~R.lIGr$peTB0bSk( ئ3b1>ș),qy5wJo1#[0%{iA'j6uH_`¹g"eDhAU33` 2ohK [VuDS-+'/Tx>5l*/f~yd=zm< E?<|%QW0EӖ.f"Z7&71AD M9ëALg5.ciȑsD)|zpXh<2!^ luч)WZ=Oh8iy1[nq{ ϊ+z0iTy I0(3WoFX@Uʦ\{PtS 2! Ar Ii^HGB:qɜ8:|aR[d1X%8:pe9n'Nj ie@R4y>Zq` sq;_5&M*N^;4IQe+Zm,yu|;"} t:llF~+r 7OŊڎSs=nKsgl|݉'ab8%sOéf'D)VՋ͊Liqz5Q*gF)/ܐP4SmTW\S*/voW//.?$\2ERFݻ.VᐒDEe ko 2L Ce Aیj˂s!NXRa, Jh3X{ұ"͞.Քf݌4)%hD|O?1azeV~j!κW. 主B7Bѝ3 (:Wkb9Xt92H1/n5PK޵R$z!ZU-wiY+ X:\huGC( Y* ,Z\&TH7(g2Ί@4&Vh/YF)&D2%p&i|σS-)]-@#f f3l~yQ& 6Ǎ#翢җex*F%˦T$-G#@$ 9 e$y]!O7 iI'hhY 'TR;0\1^dXjH\Ta'iJJ%JI+TIDN4p=n_V'>H_ i~<6ovG28t!b Bc:vOP^ZLTi<'uwp[KV\iF8ӱ>EA޻$jMVZ*Bb?)$DiS5^[+m) od7 ~g"V 4P!0'?Y#{֡,Ndy:#MלC!59ark Bj-JdO}C&_7S?aIg6o\ʛ){eU]qӗ%( =4(RprVD1KY li{J%6yܩs]mÒ?n=c\>">m\RJ(;1L )bVCzAF41Q)+h\2>PE8XRs_c>YN~+\ITP Zi}sFUU,~.\ sٴfU5:[UJ [BCɴ"[ YBKXx!QB`Q(!:AIUթ߳ȚTւhs7[eQX({GV2l'v8XGТ"Gad` h+hZ]UKЏ{'YɚJ`$cvC T~U%V87GW_S:}ExkU}88_n cc.z8\J,nqs~]rT' 03"Nf;vdӹ2"K QM.͉֧ZZcc\~~k5] Yʫ{WW'O~`ϺGXt6+HQڵh]FV^y}~P5G.=tMu*Y\2$$R+4ZA sEK j 2%t%J a48Ep(5sW@Q%OgV&1r"*#(=#JxvL}9o1"Y+Ӕqޘaq9|S0="3|/fBn'lPDh싡IQ_"ҚL o@l-hNke*6 - u N+4q-/M]4O>FG8izi HDmz =:tzzڒ tlT4Փk]Ll}4ʙj"R> XT@p\ݺ:rz0yQJ?a{IR7&܁?Or6ɺYOfBt6*8'B컭=eO=*"*IGARʖe῝. }ĥŲT8pzJ%-d$cM8E yLs9 E}  ف R+ lb)@03K #03tǏ LMQ\CeؗI2*sF&U$`- #'E-I' ua,-J8pIzQehv]|i)JQ8F4P?L@շ x3n/< L=^>^ܼ|U>]iT5/8ƈ::f+3ju f@ ř+)btEqJKAQwˉ֨<҆fB ]v4XC7BCJP J*iQ F u8Ev3="P\z4X!Wxpc)g⸗GBX:Nі<Ûմ 'g4FYmKb@P)r_+c?&!DBת*Wu_gFQ=G e Tg**ż9M "A)i Wl9QX>Ts;QMD2B {.;`M V$Z.M-Pŭ#R[dV%’1>ڲ,_eX4B\lnvR\zmJB؛e~ |[G)J/ſ>|~Ni-!M}ol_<G؄SM<=/? fsOw,jNhn+>{ٗ>PZso;9?\ln]t/Č>*(ۿ#kτ-?}v+ѯe~tMC`;~֛rD"o4D6j)~ 5a-3.1ko57.?==vDQGBrDl9(;\ȩ:glcuvSQO¬KC0p3g\bbQ97m:ew-PbY@8!TW!PMLweѓ%g&FD D룢,?qh ӚdV|ਸ਼`vPƀJ^RH!vs?+!)~`Wi!:Af Ai8<ɞE$!O-p %O;d,-3( cf85紦H=$\# g+JY"Z>:* U݌͓//.epHɟy<]s*5Hxe`AzYqL){Jf@29傆b23\0Œi}$Sq% V *3T|V̭[~ЂV'@҉r>:t](t<$;n+mm-iGuRe:MXFGv#l=dA9% ң=s޼"XLE kq2^Bbvf7oF62 +t:RzM4'Q9j!&=`'JQ v cqᯰZs>כW>*:OV{ v2e޶w07W+U]A5f2I[T3[A˒Pe %0Tx\ڭJK}yw9D&niXyv]UߵA X5Ji%^^RZ*+*(*KYRo^PH-+V48E((ɪ^hPlAOy8lMݻ͇>54 {ǽ٫3w=c]:jS}@dls2BFA?(Oˈ*4ޖjXV#ڇt9QRC$P’5Si%ZZ1ԈהԿ:e--%*~WAǗ갊?krᅔUzAS |'V9Ě!hE|Ыk 5m7UZry@I$8uvWgB׿d=]=N=kuSNe=xvESVlajFZ]X<=:I^teXG2l.ңGX C 9b wrsooU2PlWEBA[C>@<kb*"p & @I2Y LpG㢙L~2h#JѱDnlB:J,fSܐOkr\3V-qX%b*ә-g|Nb*C@-%S2hKuq%ӣ11=y2^ڢ||Nm=]1=4%M LlP_DP_=|n#e?r5k.u:~!icų Džݺ:rz0MĒ;t: G(o>̄MBIxh1LL6wdѳ |oc|Kbg> U/ Ή^t_8R|X}?a3=..X&Z ~;a$t_r?ry=A_=lOV!o'qmE6FMf oq$3oZ`oәQ}o7M(yia&XUb酾<sW5!\7oQzAo<htҝ>{0[:c~ OyȜh+AqHRJjE (PB ch † WRf SX/a7ڛM-;'!KRt5|7yhY=~7`P?)'?^ӺSď7Sx|t.̖su|!D-/}#Zo]rY§+wڻ ]J%W܀̩Uάpm+C~HHxxp+ 5|Ħx$MDzuԦ$!9ܡe29N繬 %>XV/qwcPK:I`L'u +tH"/}PP1W/ox|S 0 tUP/ LWƇƏB{FgfjH3!"UL3Ӊ0_"⧯n~_E)/@U Rs4sz o:>'F/?M6MW=ɘ%x ۸D.Ն^bu^_v(-P襗zLM/dd=qPN_ỊK<  G>,~ADք!`ɛo1R$yJkVREe-qJ PCMpd y$6#Lh=k濿W*J86kknX͞Czfzz.ғRݸr9knU(G=u"!@T"Qo3}. >Zt& ܁ޓ !Ld(4/l a.&m[MyHSw$>k@JNDsŇ|v@\(u4з)y(D1JMXj=m2Sl^jĆ:fC 9mǬ~K-藷<%ߟ:??ZW<@؁ 9z"p0}5]C3s hJ;:4,r%h?tI_f[-VMHv&,VŖV1*$ȝFz9e1Pf9S<b,\ǩ;ɚEOj:Sǂ6=oQ & -gu\גYU?c{|rt R`[䙊+ G6y;ZXĞ[I/T2 cRIAi3xyհ|ҮS?iUv!wőևP ^5W6{MO `@e/bRXeJ,@C8/fFE-/ԗ1oϯmNGoΓ#]]^}>ͅxx"g7tzꋻzu~_CUF@[5IoBVngF|Ǘ+cg~Z9tҶHS(zۓnGAH[NOUez&rKC6_֗x8Ec!qS8XJq\[Yy ɒyFg>=DSY^u>M;l}ft<u̗O8|&]~E?KW_x;Û]#݊w^,}D_.p%=JĭS>r5 ՈAAG׸&7@ v@IYvppc߷SYYnD}4vf VtٗlD%c3Ǹ@Jʝ7h2s{)k@e|3o{zm/Qh+PV),2w}zVğ,[^~ߞe'9wz^b7o$>ԉ{ 8e"Ψm&GaDP5C&X8I `e=XusO"Z^{ %GF:鐫W;ˢZ!FtXHg$b*T,I86 X&+E'S-J)rD EamQ |`ΰBԁ408WB %W _m4\3ǼFk{i :@tKB)h ϵ |ȝy1Ң${6洮29v:q!"lPiؘJP.ʮ񈧊;w;+O~A|6U<91Mwl@jr<}gzLYhP5y'~¾άd Ԅʎ6f; %ӓٖFשhHZiB!05z/y iE׻G/L)QaC}thVZPL({.n@&ݴ352 S䶺Җ9 SH.i4TBF. n̛YKg?aG{CZGctҐy|h>h./p`ƴ@KpG*zuAxA("L9c$+Q4y'P,K}l nTiֲuc$X08$d8Oye+m'HҠb2+\$A+8FB-*VuzlZ2*)P`d}L^YP9>)2&*$iE6k"Hm)i!蘆>̯p8ߏ&7Wҿkj!?-N,IOx' f[J_rvOOy co+Gc57'j6s"~NjG^[s7{6=}77`t=M-;_.N5,W7~};oJXvʴz$NY,u-[!gaB)1$+K/(MkY9y5c"`_J9QDkE !9ٗeL ƊC2oVe[=t91ۏ5F,Y0c 'b"r#aVɱM:CXW8zɡNj4*C0{. Xεv(* r<D2MENy\x[$DVA\xg%")^ ge)܍{ nq`A>N>CP)Sy㎈ 8㆛v)MZf.w uӀB Pj ե(CHfT@ȓuJ)Wl5"$]_8p(<7B o XAȧofF_NOڔ 50 xu=p70,uP0%Wȍ>RN#zYD팪Z9mӊ;>W\ĤӅ'0iN*9)y"$@N<)Ct,UOAQj5-AS"0H]"Eĥ#o="g!X/m;6XYƄ8^dLPDL#AcG[“`ymc2J3@0^J^_8fHp,ȓ^ 5E:\B8\] Điu.^&gdlvK4pqHuZOsc䒻78n:i!<䉃 $4D[T&*8 @ eP &Hj2DV()86KWj춧P/VvA*){i;ݓOdrm |@a}U\MiKR&C|*bUTu»)ר[oh-nY71h \B7ț&HMMaf<)<&&9 dX68(o.on)M)oklU!k-ȥwV d 59ddbm 3!Z`ȚBTrRЌiY,跘k&kneț0EM59 ?bU#wˇ#UW s+rRC; "27L0A m~Www;Xj ?jy~SskgR搲[R)MR1KQEU_7cP9/ ޅ^lCV䷊c *sI+uﱆ&?>OF _N1/$odI YX|&A׶ f6f‰G)Tjf=oʹA6fV JfFI~=PQ v7^k]fjstarH60e #M@ V6[x7TqG5g NB"iE9W%=ʹ+uZKSH 7\VLEǦޔH,ƃO@s ^40??ϸ- sR Y˿9- o6׷«s} OJcUCD%5!P>Dl|\G+PN~I"+@zs K,4 5#%.f2~:}؛NFϯ?MUG?,Ý_ӕmأY,N!67Ӳδt4utM_Bzڼ:xߒمկW\v70ߡҭ0FwR0}4Jv A\B ($"6͕2wV O6CKB 2.,Ћ/" ~J_]ۀVH lT:0kv|n=cσÀ]ޭѰc*-&ͼpIż*0oyA3CkpPi:dP@Uܚ\ (=ݞjmXoPwS>PnN%. pr;+!syr\ο͑(Yy$)8=Zc |Ym<|xf(ܟCgJy;>ļ~"fzLRkfqwm8EQ"Pj?/g)J9[1K\*$䅋h+0xl)G:M>,3>USu!!/\D[Ti@rep?WԶY=:m&t yBT},3*Fʜ%eZ˻Y+.Å˂ٝj`U9cz.$$` +dU*S@Goˌ:-7b XYi.}ފxMĢP!]ǫMjdz/ɠsn{?bkE5ayg/7;k|ގCbb, ƎPRJД׹Yel0b3k^=oK00 &Od.PW YϺdE#Jpкs?S!8v2[;]ܷ!rqri4mA/ڑ!d [\7~ l&?kˇ'ȏޓKz==)ݱ< S#|z.U{ZQ`AFaw%GOej!Xԧ*&(K_i}Zy=?{F K/:;R/q$-{OrjV%%'HJQph{kK<5. 4)2*ަ_z+;Y4{:ٝQw41uk}L]ck-`7 E(@ʚ(sB9bh=oB9Ʌ&֧6) @>g&a+m3kE&[҉d}=9twWo\Εst!-^t44}~=zGY$.,׵.93~vyv?>ÿ8i&]8HƟmg׻]Z&r2ˋOb|Ӄ/E_aoo'nl-`x[Ew4e`fq#!I f }!-ހ`@@+R3 9&b&/)OVL<-k|Z2=<ÓI|Bz3ճ\KfBӇŏMv8=O\V뵯ݵOu68Y)nULc/he8rVʼn(rYmi+W 5\8ېU ?sy9aaS k^;3S39(ruw aG=ӋlTU<$9[/K0ճvf?F_8>~Heὡ1C}f^abc ߿f3/J/iNJYsbb /Ѩ` ĿY|nTUPqz ZQ#О|i:md/;.mURw櫐R\V6+$-G v:}^Q@Sv80cGTrD4tqN:d K)N sn(g AaQl76ib@{rݹu'ȐC idumzmqUn yqo!{ ^u6=+VPG(fZ r a&hm1Zz)c:#yTb*ߺ)@ն[(%SJ҄cg2Ty쐖PcvK9gZ<(6(&Y8.XP. Aa$%&t N8\]r12Wv@L(L;qtn vufCh g_9.\..b#^aWlA9:-E{OEcѬ{XǢ9sE 14*l킹<_44!tLLox U( bR~.VW-VE,Jŋ|hOIW69+ z} a)QڼRiN&T׿_E19ՕvuLY'>89MO'x;~ݫS|O56hԭul99MϢVn?MO'49%z~^r ؆_?T1*0CPB6>-jOg{G)eA + YѝA2DC8WBV3ӝ޷xK!V+? \8q] g 5#kf*d iv^IwL5kT!+heM7;{fTKvQC)ؒ1)I6u)y9flUMdHKQ2"cȴVh5 JFk@Qqn*еn԰ѶbG:au,SD 2~>zRɆ:O9*Z (&jCq$)Z")$"%9nݝ=0wy<]wǕ@='WVt bGI:7v59}S?d^gWzG7,K44_>/l/$xQB1"0~1%*EuxԹV5_?Y@J`gO-hh8{ǵaiy4zW (Dc*`3G2 IƬq˛]Bz$z 4RR@@Šu _Kn=-q) Aw{e@y?P̻uTh%/D+N+\/:U5ΓΛ|]1ܿ(gg6emȏ3ǧ@1|];Jߞtf[ɜ/6y_FLQD˩C>>ṵ/'ͦa7?yC\A>OrWHZgz>tĠR`9KY7П|Zxq#&X^Y¹ 1jkk;]JAQuBl#uuI$:+Ras8s?`k' |q|ܯ!@r22- m^YzW&n_Wì艂dݩS>maHp(VgOZJygD03 l\0 @;]o}[ 6ɗR*Vzzɮy9fC[9horE)V΍g.y/^9f@=v-Q"-aDݙ=Fl1a|ŸEѧ̓Y8obĘsTFspii[ZY7YZٗMj_ن=A4҃+p{BKTW:_[0NĸUr}HC^¸uz۰fYl;^ḫ;zńﳋNa_qT~ ? {q&eόī/9kb>,vO eRs-{gB V9 AǷ`MFEBn#ō` QőxQN 55rΑg qJ : .h{P ++ yːc*CF84 k^' {Gw3: G7O's !p|tC(|܂Qh o?&RkϏwXc;Ȼŝ7Yz[l%w_/)&[_<=_x1MO~򱶫X0&O&;&IɦI|W߹Գ[΀߹Yb:O-6bJx4=zw$(?%}ge5D(!gTfNm@PRb $F랍k!GZZbRI5c,.R+i"B!󷴭(5UjREIׄ.6~TC|TM+%Z餉#h?M ވ5jddjyLI7y%kTy3o]Z{NdjK%P+K쮋^0cX`jZ%Mv疌eblchTW#Rer5CGқ,]^\鵾G;vcz(=ªVIl'J`6|gW1 uw.ESOjcڦ(w')ž Y)nrU1PWNaD!8\ w1!Nts^.9EੱQ]bXVbeO۠EtN?J|+_Z"V/#>.l6OכK㰝NnIeI+y'gœ->Kwhϖuwi´vGnk|7 aϸjZHNilB  c,Z^F;xY%]i3bPrPydHO0!ʅ1_۔|`Byg+λ3db里+fq(BQ0HXØXƁ3DxaQzģYaEKeѯ] ZRI ѽ }EN+Ucr\.U*\I[!c)b_ԐQQF}][PFWv঴6nJ!9Ć=ɸ%Ш@Z̨hmS\$LN_|uޖWeF(( Եc6nG CQ" @ &:F h ھq㷜s .4Jipgf"nTkjÜ-E$cT&?먑WpA[_b˿B9jP(D 6\֫l/+UnjyU> Y C.he7;hQzpVQFĜ R1( 1NuC %r\ڪb4 1W!"qEc[thBUs:@E?l}ä+:@-U_tyhutxb) z|#88-ow9TM<,?lUԹGh/Nuā+ a/CeʉOIh ( }J@ @s $ Y ePu)%~ 0qt $84 !5d𘤵#N",D1FD*4`W pA;Nqœ`LC_4.%Py{2o3\Iwum_aU|GI]!s%PiL2rR%=鏙n=X&뽓!̢1 ֋? X+8T޽ վhIkThfW%\+=i>\ʱ4V'6%!xgA5|+%oߓM#CސO2 u3(Խ_܁N8<Õ\VD L+{lͿ(K*30NCeh,ܘe2ۤ\Ovj޽/U;13H*aJu[ brs5+zn .Y342=Ҕ(xJ3K'HI&_]f"Y_Riڏ_ `=W-KHs .ޣ'TH-[%-Wer#*eFrxu/Mf7hWDZgU{Z5I759 (OZȂ&Qg)nQ{"n2vBL1sv<+>^ғl;}K3̥di@$$>IfA`"JErQ! het6)Aԁ$";k}`V48&J4p.p<^?';  )|oDg Hw;V0mTА~ܸJ԰ݸ(ɟ1r}UMIwU0'FwGl񮜬mzB$[e,gL)Wf!fZ"sʋ@)DBO JP ] +Ҋ"+G-WQ.*I4FˌgƁT:+)Vf7Voce&ZY27^77ZJtZk}XRK_x_,O .ex7 |_xZJOKi)]>Mob2$Xvb*ILI7LnK~"vRӢ#g+Fy)"Wl%Q?ywhyTJe%-9\vc.fz 9?$&Aps}y!R~ZjOKiF}QPwBLAy4YQ%Z眆?pYhїĚQ e~dZLedZL-TVV>"&mi&mi{d"Z{y$Y)8"Lh[+mf*\K4 9Frz9`rOZ)"CH5LXyCS5Np$40A}qb=᳐CDg5a.)  sr0$i`.P#^rFfكޞT )&!xRDK;Ce~(R&m\bh\$grHm:\bFqװt;z>'Ȅ%\Q&A" O|6 / u669;-2i`=8]͈umt 4#XPJ1=MFT&#scg},9L]/.{ڙdy}\͹yuF{<C\oX}ק q)M)M)M)Mۦ<מz0=@,nz< su[ pJRL6oBcGOBiy"4kiܛT<'m-M4*&g=CqXd UDȉ0- >|LьQn{ȉ$քȇjojh݈{| Q'u黗UEvN|H\5ڎ#\7>E?xf3f3]/ꂍu׋kͬLaڗt7;xŃt'RL].c=nW|bGuyiF1-AzoIPjœAD`bm$q^ Ў)7@-kue`2VV1al /sZ3HTeHߛrXŔA?gUH2 grRj]p^ؗF+Q NF5VRKé΂InMԺ*DV0FD:J,Ъ7I[)@"'FTkaZk\h6f|@GTgITd`c[L ڇT\ W|x~AejOW"$ZKh}:HeA Wr&I)*B cRlA`\rɽa 9`RݩyAmUv 玲kXeA|Bce<jY~HV;EBL&Dp>|li„V2f13',;'vv!Oʎ|Q^h<6;&;UHQ8S"~h-cchZy"“G,<ɴZp2Ch^Z=QO8\k0!/" X*4nq34݄ss>[0`/!}oF'51E$㝿f[E`XޟnqvH'fba _YOR1vn.U՟%:J ijQ.i$x%6Үqm+ɢfЭi_nUXo ]~>-lq5omsjfUaR"׸0okYN%[=Dž>d˃ͱGjuaK%kFU~đ|.!m$;_UԇչZlOgI6^/GL+`A_\ 90_UVƜKgCdZdiwZ/(t^y^v_v̊Ң87:"M@30x ܰrimiK )-\͌\.yfYS;~j+ؓnW[`Uǎ9ZR1虿\}H?^&?YVx) t|p>x45vف[7,\ӷMgBe~umM?o?LE쩕J3*ǓAA#y׹%&LQ`'ǧ/jk^P)0Y1\5{ Pr,Y3z.lb DpV8ge8 ՙe*%^8hFEZ 6Ci沛,M6'#͙58 GOG\erT) 8 S1{."+24-D&!KSJF*W9$CFhu=p E`uqǬt,ՉZv\9Z9aZ+-D&҂6Ud`Q! %欵3`SfeP=Gn{KNʹZۋ%Pe _27.FK!J]bbxFwGH q%DJ-NVXe "Bh ۩8ٚKt 2М.RA;d4!%u%b[K"n4|_~^kˢv{xĨ$}kdQ|%,Jt4-Λ;ӂ۷$̄*|[Rhi`1x*ct:l$,)o) H81 2O4Uv}XQ%G?綥"챷2Bz%.=YU8z#c=8uj `:NM؟jZx%t(;\ܛx!_1F|5@n<|!@!zkw_+2!߁Rvh/؂O+hBky4ue% 3BሚTJ$akv(2Ǵj;zzm'"zN8TP%ov c5taD%G½Ziö&-īWt\z0BHmQ]C>N]G*pԹeat 6@NDGCVLxJg*2.3cT~sϮ^Gg׉pt˩i/5Nc=\(fฉ]+]\AnzrS`ٴ?\`8D-t.kP,>/oQ7w!3|;T0c0BIy4_/}[ \)qRYW$dRݓ`9(aU͋+FQ{ˎsN_|CL®!ͨbIܪcסYSQ1-W*Ovt,2j޴װ., [y`Eg%x\z.#DNtl319,E*慗sgCD+ޱ5:%O;YTqbk9`/tndsk鉇lɅ^R_o>6qvf[{:çO kʎwZigTWkUy,͘k$]b !_WXJ%_CZ&cIB/gGE:sN6ѷKQZ҉7!E oR¡İ-s8SUuuUOu}+"Я잴O"vLQG r-FvNR'\@٩M*  9F}!oz+7Gz(vҮ7?޼ -ӛCm)"R9݋>S5,2gBPձfOL^uydi3효\3֗1.E]g\jnW-[0O837ũiI<&lz95!WKޭC9 PN rgLJw*0G|_AVр<@Ns`rEoVJgbHqa ;ȩpȲ؇DbǵaXW[gtہAJu"!+\Bs"xʼnysTW1g޶ M2ϮIֽ|Um@41q䮃/ۆlz:c,t-os?.РH4 ėȅՏ y8?4}aLӇ1M`˰m?!>amq`L&39ϱ mf[ϧ+j8]ׯ/ut5T0 bi?LaP< ̋g_f1a䣄^B)s$V\ڧ)kFV(>VR?UIڠ}Cf}$>.dE)?Kʖl j=V3AW 7i!t.'9A1A oHKIe$-1(op.2lj[m kM9Q@b[<2lpJD^ ׎), H|{w%lkjh7CS%8:IIw8 1F=SATZÍӵEfvƳq|&9I{[kzbj(Py51tGqJ&X\ݹG-FSD49ltC@j1Hǐ?Ya,hA x!'2_խ6:0u@6˝$^XZQvhƑ]ҀckdΝw8>(dBk<Ը5Xm5۝2kAa3/hp*F)i"9謡<Ɔ:ivsT3bHD4T`8V'Z Ӌ s V5$9 tY) st>RD'd].u8:oQ >Y>.{).E|C˜?ޓK NQ3AkrU.ŬѯخADdgr"o_?\@7͗燢kw{s^ܾ+_jM!Y] f6qx? ~Y};qɱHs_&U׮٧TKDhXzn NYS0F/כRrH~h,0Da./Oݢ" 0nc'a4Z96_\mV`XHto¬'\fIS^j]J"/VGJci.3a.Vif0/r6d!oD)К;/|R}i!|AKz5DЀxכ1\?j.ҢL|zQIXӿ1.nmΗ 9@fEk} yK.DMkMP+bFN*BkDiX?"8 8+naVS{Ig,䚈}sW"$u3QXH0ޑ=+drglGLĄ*N*ϐ;+&uۙ&KWMWnT]Bl(1H8Ik!d2)Y=;{f ;7vk,ODcR\.0"b ŃC9Ue,s ci \PTPTy$TU+sqD)7KdZ`!GR0eA6<j115TK n-ԅrwiHq(]P4Z5@UWX:RQvj(ZIS%9R.&{*ICe~25Lj˽dNLɉmUdT.(+xk.i ay&ݽ.{k8O& & M .ݷn M+Жchqys4{-"M $HQ \nV蕪=NAP( ) WwvEaF1UƩ: [3w,$WAw-:c΢X=,H"XҸFֻUHj/6h(FPyNֹ#s%f`ZKѝt kJWBO9*qlTs3x 9<j3#%cN 1bx52[s5CHͬҙH+=$8,EK6 Xf05zHQX)gx/!^`MYhkk[-8`%]NĀ\ +(!A-Za%=I*8HP*N*0XѼ`A)rxdaDH(r>X'{!|A;`IXxQښ+6A"^:PF]@0X.`b-$;אFFä.dq&RgX5P4܆\yC@֡ƃF!]*ܙJX_oH{=/[n"g/!q,`?GO3.sa0 /WCi, v׿6E[Xw9~lCq ~~1SLa3a9SXMGwqUn#'/ aDc!gj܃dlwoG||X5Jޔ:q.)uQx_ԩRh*-uMM1zm{7Q|9wA}Fvh;Tμ[큦z&,䍛hH>nWޭ.)SUrЩn7[MMQrj;=[Mbb:uQŻuӟt̻hwkB޸nSVE~ڋχb{HZs_?Kao2]oX? o,FU 860_[KIłEi4`|2aqtfixn rfm|h?C]+O͸ M@GSe[Ienc)iY[K'eT} JVU-5{uė77~emPvhYŀ2qFJ)ؚ[VTqՀ#v<5S2,P=ZJ$ۑw%:R*[\wKP6֫KhB$py*933y&@hrp3ftQ";5TvIs4 ܥ+'*H3$Ryh;ZDqNY}4+߲9hF~\&+Ih_<kIyF#a)mW!;`SZVEZ{7+Zio@(n 7VGtU[ś]-tG$^ xi꺳TҦ)2BC= kQ oNS_"1k- i~÷Sh"jˏYo,MG(u_,.*e}fpYQ)i`ᛇK(Al_D̿xh#nw&-ջKO^o0zazò+Y\yar9'9šmT{S΃gIqh[ϧx }\k鴵 A] '=bDK<6~ԂcQb8)7-Y`6ŠEl `"/>ؚGx3-k,PJ2Es c@QCJnxQ>'Dafp35. 1$jaJ#!x b&} VLUfc *^{G؝S4-ؘY{ә@-L I9gL],s1SQ87q`%u>J#-u9脈\klI-<V ,EiRt(G=19m5AŌCLKNBYy!D2$m.{\y>u%l% Sҭ ]hi$I*%CmJ0u@MLӮLDAa-KY0}9Y(ZG[,؇_b|ǧ;gTL)W_>(]pN_ayރ!""{nF/ |6I>&s}8@{㯕䙤_%ZdP )z3HFwhtgf ?o;CYi}e|qKτ7՟;Wv~5Gb߹]a|>@[`d㛷'o3 )5uw!5 2BÏW̒h0+8$"YԪ=<{a !҆U?-9Tx)es^ re>wLxAhNs@ER+E')JItqie ι$c#;`Q\]F4U'g):2s0DNDUkR(/]z4"%pM:甕SϩӬwֵ` -7r9 ^4s<,o'_.|GgGz~V ٵuo׻_naG>fO5Of:Ъ5oMg+AlQFZޅʸ4k\x \f9noy5ueLK<+%.4!9pUhoVEHl=NƔ_pB m8(UB<->LZy]} 0=?Y1JR% 0G( qF*ހ#e)4wTjć:Ten ©uHI=q&\/a-󭙓<()VEZV*MctP{a1-|8̏yv7$@끫]ztX`G tHF-@onG㣀ǘ!voUŽ8vUq/}^[j{أNףAlWb9S]L「L!a^ҭ0 lku]l-%9qqkOY-=ͬ & B85LEuKpD$8b*7LiCCK%% xR^r-^z C@rWHf P:/6`+5=F% ,Q|̇Z }60?dnF5(KT+=4 'XCfyHOR&ΐ5@3nnם896DH쉓@&єar0w+"DiV{iƧ $'n7rO 'bN F_JkBN{zJvubnS82T(CiCtwՠ!JLG6E(˒)fTĚ)Cҿ Fx.2#z""'TcJgJw@}XStG;:tKE쁢-(UK}~uD{%N<%E5å^6Ɏ+̄2ΰ,,)}CTZALIPlМ-ȣ~16JA1݈1ihk D?boث'3X"T,{5yŖ6>b%5c.梴Wߎ̧҆j͍N^IH4k!91I5YPWh.R_mc8z}U2y5V/v_0K[kCu>Ae^k`ժRƿ(iX*h|+ Rs+R:n<\JeYeqiaO0M^ʽy]ZA H^C 厤1_Yy*^fʨUi[@ep) 9<[ar Z,/ϗ2.OR!٫*o\U-`w_/Pg$}"2A B"Wera[-# wQI})ŀABBSM _N3ULkJ!R2BKK|p[eG= *Y|͜A&gؕRq'ELH3]0tbA*)O/C)rs2@HT}<D.$+4bО>V ]u(TG/ 0Eߗ+׌-+3aMb0 T(F|1*axp8yX @F c x&9Ds tO;-5ILNT\0e[_rKCEP }$Օ4>]z. ;xV4&TP2tQfˉ@%uһl)(z;Kڀ[6juP jgZZq?_oȤRɇwcPnrǺrAJLD((e%3\b28*!O dY.=[ћɇz"~ʪ7TX, v"-PrQEhm <ΨW'JtVR܉B//rg"@9Kh,qA-Qdh܉J)LA4ܕN)$c -C9c`[n 슔4~][Ab V+Z\P!9N9yΙ#P\\POϱ#aǠ/U'N)-'j< `@ Gٓs &Hgy3Ք:Z-TQ)PUɪz%%99#KSnKCSQ3!9n KqFisZyuq0Sԣ;q52"mWplh1ǻ`H=#\_ҳ{M+XTi-DE[Ti2\5M1֢ƱQN0%EEG AJv4uGJte%B]>*#=4gRm4↭@}2I$ M>F SL VtBUT7jUTO9m9F=[EuIFWUT9ʤRu^oPexp8YIܓL^9zlpdT1`ӳRzMěxsxsT0lb^gT 1W8 -F1 fd:oq~O۴Q\;i xx> ʰnL޴ [ކP`D3QXFQ9 ^ìR+F!HI\U^윺NqpUJeZ{mxT+*᙭hYrs$0[UXyNxaLns)T뽵U{lA&'JҶ"Di\@Ckbz8s(eL {[">jLHH(U߂`ΔbM@"cvHh[1>F v޹|?}t]۹R|7"'Of2:lQQ͹uٷpgL3&5-[4=<8I݆|ðWD Sm❇zB8@QYSw;o vi( 7VM.96-h@GAi}[v)X( &FةZC./ ZrġR)^ re>wLx\%Uj |FA= *CTy#ݵyB ι)\P.a"H\3­@ҠTV\4jQ;A,đ0'<=jӥ̚x ?ydojUԣTkS'ǘvfbQ!cizCUswk%k6Xثc,|(g&ȵ_mE yg=MLC\] D?Ty 8^-)[L%MmQ7J*BzPUIHctPoUB]g;,rvAMrww{ULЪ/7mΓ18?{]Y*`RXR&4^ ^Gk>A/~u׼ުxLy8}hS(%`-=x2Ru!!/\D)!q3́B6G9^+Xƀw%w%:S\& +3kɜ 4soג4X%jzHď~O#28D!ߐeg!?"oH ٸF 839Fj9ln<p hg\ `6?=bž>FdDd q¿uHi$t>!O9T7"?ga|^{;xLp,v/Ph 6JzerJ%) 4" */4-5ނi -YExA`+C qW!V\X,e#S!GC8X$qJ%!פPF+s!P∩lM|NY8:~C5fDr;-5'!u8#C( >dvy~V1e=RAϮ6N"rVfv~D((?x_gvD^dVcW|t%}d|v̋KȶZTZeI uG\9nʂ󳺠jlT|LgGK^'%lK4 g`<38]X)X+ZI )sr3G# 79 ;>s iσ p laݩ}ի+ZDW3#埶\ fO9MFx%RZ?+@BST޵q#e/{{ŷpvaNv?d6Ռ߯3z^sZZI,kz*V!̆ '\[R^ RhBx޳mx&Z|iJyVh1PF!rm{A@)6h]A5!he :h&-a -^*Η5Wfm8Wv!F5RyK1AEZpB1#$Z!mJS-p7mM{m=K0ZqLG AfSZ2YjQ]j:t'8qJ7LXh(.ON淧mA5MJV`+b Р*8*+tZwqa^~|t(^sN+EEaKHh$AEM.do&#ե\cYk!i ܦdtU^cyzYQ }}uO]*=yoU_/!?3 =3䌨f>p؊0䙅cs_p/ҒP{W[,T~;.W%T>%ms5%y񭧒,3Ɔmd.NG5M٨rmmenifHbzb5*,[Kb"X絅1{Ga7$Q1=~} lBr}X^o pQj9Z]mߊ=_{q+ȹG%VwyX҆1PB#TE2!Қ M<7W+ueܳ.=/<3>TAl8F?H `F!!v6)&79OLD=}3(a+Q" 39RC{V20o T0 ܥq("(UT\uT%0m*/;%$Ǔۘ#hd0+a h w 11QxN0 T_1-s!Jˆu js)R}=R_uH!6i[ނj|.kuHdTǬ4 ijwk{wd1Voί,<⧅L[TYkMY!"׋p{m/_}s) #E7GJȆ'"juЛ>؉Rj^7]42E)防n$pq ؙp{0Kʟ ppn$oh`̔+k6uԙ OplVȹwVK]||qT,nyaH;CGOrG!Ni? <(-`lGn 97ۏv<+?? =x.88SP7";&ǎdg %g`sC˜QoObuz'-XO>>"H_-M{_̩|Ѯ:B.t_@GT^n9XU$ OaW{/ʼ~ ՚i9ϻ2i*CqN)W <'4vl_ kM<@AHs41I.psP s aFTcJyC9:'/*eVp߻GZ2)uizhJ//|xԅ"Dw)^7'ttyMUݴ:Eȩ& fy뽼ɂwFDec5"*/,jșu #jsprWdtϸGlBu}:pc]pp+5=:>=$>B$%x|[*j6 ͪ.ti:obM(D{"!:%es1P@Nm\<1ڦ*APcrRSsiuH(MF$J`U(/4 [P Jw.b6ì `b-؅?/mybx/nU I9ΠwO*[E{>*n4SB<3Ƿ'xq;_pwyyq? !o߹|]O-Zߞ\}GQ3Iqp'7qMqٓF:8%#jPf6fӔ_E PD,,cFqC$ūCd bxsQJnQ,}$Ѳ926v)zI'jY_i\Fp-eTȖmڇ3Ԉw=fT"F`F[&Z,?R*4G^J-.2NJ j ~?üA{B25 O~=x :af7m=ϠCRĊgU[s+J@W#eV.@ j*>b["q{(!ՖY1hVAJQV ;n*Ҕ-[2EE͘D@hk)M\lIXw)6ziCSz 5~A"L o_oQ֕][oH+&RKLh;1b1Vǖ]$KDJEE=tږ);:&vfmbᚿ&OOۍs)7+Aھ)p;`Dզ,97Љ;8gcX#]#`-o+oRʿ$|pIHZ#g<%V*PB%(LD& 6KMYO jv^.`)8eh7oowhD&ir̺ sp/s~\NlzMj 0w lcb'tu_NK튆޻7k׆L}^/ 彬b !~^Ɨ3 ;i b-O7_c4&XX7?ܼMPʹ>/av'cw8ݖ n86~w(NV9}W/ DŌ:D`|A= ` :r%^5pIrBKo#ui!֗[ذV)ڗoNWp>]<ݧƾ.]lOX>V3J|op h$.m] Z |D7˵+PQN\DcBtaU 9/RsK0 5\|ObQ3^>xZ" vޱ#C'CzKIWϣe ҈-~W&g_^Yԥ>Oj91"t rCiTA[亮^@p0#u9m\3ooooLޛҝr%#+̢L"xJA06$ʠ*cHIV¢\ )-nJwˇ_,B Tף'7$VUZN2|P^-s[{?S Q*^@pqr:{oÛQ)'5mcWĬ^4_lEvgMڊUZؒȳg9/st %ּpl mq*,0o3 R%MnE /Pog J cC-<&q/()$WseTҦ g711\kWsmh:t-0۲h "E)\ f*7\JҀTL!I)1Vg-fz>U`/fBim~f[]4|WKpնjKk˫o0R9C``vۄf:5HZ`F@b`OVjz# 8۷p1&bR0x4TfeL2eY3j "ܺF2uqfu/rFP'x̤T-%5vr2RY@<#%jZ9PK\Og=+cBlHZ=+1S_܆`* ^\9l[qFadF"KRą$8l3+(u>)5*UU6"vhK!Â;mtߞE\hJw  o⹇#X^!jX_x1VR,Q,q‘ s&,i856KF֖Q|P9+u/4ЌJ(@?JHVBQM3j9>w%%#7S3T={ v3P{u ;"r]sUP)ˉ1ޯ^-E/kXLީy "#+L5b (2dire[8`(Vd92$c"Kbr;KO 0क़K6,w#ԹM)lrNFd+X.R7(C+0q݂+QfrlT P`ĺPiI*˱BUv1Mh1l~ΠKYWw?q0:,VA~*Ͼ'% Q(3@} R2*l.IΥ*dK:GYa0z)r@42?IV4jnѦh2/WxNMPik7pzb賦:Muv4ir -mkƫװ7l+m7ZN]۫jQ|4O3}FQ }{Yed5kSƕDtx_~BP_V\ =C~ё^o0/xVJ`rjb"/֪xgcJfqC䚹3H)Crݑ!&CrN_A .9nTU@%lLF1.ޭA;OUc҂_ZIVEKN5 PKt2EӇsIth#IS~VQ ڋS>"%RCzi9PfyXW?fhfIעA&5ψrΔl"7DgpߥtWdo:ZI~ W@EƭrkwE=znQvE;:v]^4q:>{fA?/cߧ;CKi B/6:`\: Z}{ S~NuRŅl-ag䓩rq Z+W.d-*3W,]ٿ:"g{İF<*UjYXyq\oF2Ln 8\x}nԠ 'ʨQ_ڿ- R.-MamEzYOA;l-bB<{`VJ(x[P|q*cł*63&Nvݼq|2 /fһ%9ކJe`*>)U+]Ͼ'T@u??Xm PTŒ5I"!3*$*$Mgl+e6ܺ)ЈРۋX_MK#a^H}#-~(V`Y(3O EKz]QN-ɘ'.xF". Rҽ wp,b$ ߛ\fz {wXB -~xK~GMwMk'TɠfLUL_ƫyVc{z'=M#ړK,MR #$WdO#QdZ<;rL;Gp;lN=Jğ:<#f 7xoYKm)p^;lyq^"NYdAh\^//+^QqfƓ>%a/5ψrGĢQ̭tܢ7@>HB"ট21j?0|T8۹sZȢKZ{7U1@ 6fn0열5uF7yv>_8\0AX؃mU˾zbջeC]Ķ˃\{we>o1È<ߠiRS97dy_֯aŽka%D7d&5ih2ػ?ixdshO6 lfk_6SDYF0n8R.]]h{c5B"-4/^+w(/'y8)EpĔR-jYYwV>Fcqd6"Q:CStĐx𥾽!c9;X|hŭ1Fs_=[&g=PE\A&v>PSwZ\ĩd5-5tjzsh+iE=UB è55 ĹZWnsmZEE{>|k|Pp8٪=(.:Ё{apP7xA|!)LIXdsx7?{6?ݝErX`RSK*$355IQV7hXv6ƀl`'":;G}XUXf#7r*RL& #}j Ӊp!9ԁ'q +*dNPr{.D} SR;l[άL j0TcMpXX&HXB[G E;f!cj!Gccef?uں~ډ&%Ϳ0|p0{VcrۃW~-cii;^è62Pijmy~wwwPd5&39Vq| Ʒ?%xN,A<I<U+6K%5Z׼|c3[^.x]-ՙƬ#¹)PEvgôU¡emJ]}1n|/_LzJKq#ՎҒ䪪Px Kϼd\|x*;:>9~:.bP9~}KPHa٥0햠Mb Ы6(y(}vK0/.d .!2.ePfp)yA[pK[MhY` qv OTTu+R׊D^Ump C08uM"Oa~4zqܢ[Z{)jˉ<|wM70eˈ<$σ-LX;)2Bִ8axLmҳ?R2 g[7FZbaNP5fK+6U&CSYz77HuH'A^'ֻ$f`IH )CIkB|v {,m]|s:r_<ź~1b/!*qF qc/*~UM%)$mU}xsI0S`f 0`ΥX0Um 3*uҙb]_4-ŞH1"# bpcV9'gZ]'`E/SD^4\;6=Bo 4{P;36Ũ4UfڰS,IX%^̬BaRn^!= ϩF%V`l+ {N]xތ9$(Ys_~\Ry p x3zc_Ib_%;VGw W2iM6'\2x*\3"B}d>^/f(Tg3xT![܆E9ZxU}k> 1[{!jUC~k|CӭYD6bU2|k΄SuWeo󏟂&@?Y-R[=ww{BmUghos+rmEl8[MͮSpIlX&]?F*dXW&ԏ.,cQ,w:AKܥx:$v]q]sph0B*]F̅6&/iIoݪGWL@NQOAy0j%0"w*qwIC_ Oպ *S_Wd{4oi7{m/I0u&!1T;bXA0Hk$3 .۽شs?C0?1lb|ѦUO ̭yLŌ:Z1EI Ia:%jʌT۾~ x61`_YfyC Q{6.ZjXo3nPV]/"Ad]4t}0k!͸nķiO.Pjpߦ>pHg]#bb-SqQ FK^HܥBavNE1\utQЍ$k3$T~o%:/Q9_O(}UZXqKe-XNTT }\L(o LWPl%-g +&)D"8v4 ݑ.NuTe.9ьPtH'@0z MupP=\Rc̘%ffHDsQ-5Մ&Ń!,4L 3 S#|,6Y869>3YݻF/s2i~QX&mp~sз7hAo.H/B,0E}z{ k7Aۑe0O]uL6 2&23rf%ڲ@4J;cfAq9_ YѓJ)edJEC@]iz|3M&6 N(ڝ|ja&?ⴻ2k(5IJ:h칡ͯvÕ]h|{oxzZ;g0e8qy}zrrpӷof#F8&cw6;<_^]'c'q~o;o\QN3kqҁ ҳ'i'^;I@~wmNžMpCqޛR^$uA~hL&!tS8?s> z}s9㛏so ,&U#֛vVZQ6u{zմskKJIL`kx$?{p+\ʆ{ !&3]gon[م3_O_|19Ng<+4t` z.P.?nI:I׶]z5 >q"0 ^;׭_= e.t P=b[Mzx[׃$#uσzzzt6H?tII}9Alê8k~ʙrT834]R(Age0pO(NoP>m| LowغVE8!hdb`@L\Qm[#QaXg0ܠ4)`h qpN#'#?(7sḶ e$DHMH`<(eLI<2QLyM"{\LbSRC1R(V|`T1Rrh" a>xlC/+~F9)mG[vX^_n--v q ԮCw}}BHY2aQ",\e AڋĘ`bqq`"`Z2ɘxa e$:)A {MaзV hxg XE(k0`d!g1rC6M~21D-*˼wgG7k#>,>~fmEXKWaꢼtUڂjn ?Z3f3g=)(&R2#h싖˾|+8rw`7=ruE4(BJy o7Ā5ڍėlЙ.4N*Qqk6b"jWRź#.e" A2N@[(#+}l%f16D y@&C.pUR _ 8Ŧ7)w!_eE"p),XYh 8VG(,99F2*=uӛ %{ǐpLnnhλ,opkoNtMAƻ`h<9+`RH`rtOtWfULn]=8S`210łi6zcߌ]h+t^{c^I:ݞ$@F z,ll1 RMYU~RHiBHijzp<,rE @1!G[ ;؛8 Pڀ-1$c8:T pverPp K N[2В/YK>p8egRpQzG]. ì" L J0+]'aAD85hMp-zK1u6930b%b:}mDž|HOpFL~XGܬ7悞2kB!~UC2 &f1"S'8Ƥ#n94 H{6$ /v~?8,1MF?mn$J!);!4"dArlq]S类r%Dܠv˵arjh\Q4&S{qR=88V0%oHURE G]?ߘ/,Ja,~xHIeH\?2w~8QT]^Efiu8`l/g<uًVH*0T"J /0FpSOU<,p{*7s֮o'R!)n?Ͻ]ec/~j@ljSù8Ϯc0~SBG_&׬/h %/k";7Jˢ8LJQAS&Tل1ֈP6ĝ-@.8G/um] 2$RF18!*%F^ &Q$~ (bV祫Y({-h:9nEHD)+QcjL ƒ$#[\t1h_S!!v`fooL3=+`E-ܒTxw_Ghh m`G?NO`ϣjN|9HhtՂ9R@leͣ]T\bSRiE(K`t ^VZ·e Yhc*D4fTvomZ#6?ikC؟9?m>x3VR=Ƴ e,6"(sk G2Y**TQZ )GbEa#`C)|BރbX)9_Kq~6/:MSQ0Hc4EcθA9Rȭ6~_?70O:a8 :o~ljF!<\Rղ n͓M&|>8M#RO~] ~҃rVC'ro|""A\AS&] Jrѭ[.)I#ӺLT j?n!=Cb\ @~6ԯ-[4 [ . xLSdNAA]^IlP[؏e£Pd5'<~!H5]~!<^q"\ )H3iҹ҇m\:$`g;sC!р*i2]bq=/\-晜ʪ"I,2A`ʊ՟qz+lv6w(tԲe i,G:G:U=/W3N]g`2ɲ&uc'0#3zbu<9+KQ_Wm.x>UTKݐm/oT-{QLATEQQGsrʑ}S6_"6BOks3 xf&کf&:[8cFO!xjEjSUiYJ;7Ï)Tuٓ܅vFȱ!%3C"qa/C!, k5F痌u25T{˥ } Ǹ$B1q2E/'׺rK -I:ir%vX3L`|jNҋ1NddUP ?Ozp"'xnC'~%QGTE3:)yJHk)~PF6zz%Qj#k¹DPJCD2a$c.B]Lu PYl褴:x7 fXraȤ#.Ņ^Ó1o"lyλ+^(T#cDZ$ >0d sg>̚;פF ̌ťV;ǿ̚-#z+ >2XP&(,u#J pG׏0j g1EW=sRƇ1%=sB!% c(PÙ 9unɝڨp$47}A%P}qdJ&铊+k&Jh/}c>)2K fJ1:Y|}yw:!ʼn߅wD{k}r=oHۭ/~=T_F.w>aC5*xٙ4vZO2.YTФ/u5"xuƺU/9$}yO}Jzȸ}BR*W*ϨG*- gj yY9_@J4%\׆]?LozMw+hnzCV8 ۬)"Q2}"yzH1lrcؓD@j&| \MĜ"Z! nnreQ.\Iw}?ax`fooL]X~f$X=YWϬX;ƣMplc1-JǃkDߓw`"[)VdʍM?84A'E:a6ϳj#[1X0_;Z7_ jsmN1@VWtbsחg;9(R>&7f iM `<4Ef M.#F_M |ga|"`mpT!}HGuQ݇Qj7yaTGc`R4Db4ՁXWXSN4( }y75춼]h)ƈ!POrSXo07?X@(X\χRpN89 ;/\Ffo@!"M!18$fDc%E ao3PݷśG|ꭓa-FiKp7GW9B_Nd:`ZQm>,cyI4t$nt;T=YՃ#?5muśWľZm*qiVj//Ptr=Ku&neF(Ј{v&L̩ :CU!Nŗ2ⷊoc^f svՔ#{dɖ()o,cE;*GPm i_w+- Y@':0c뭈5"c0E ܾfm\Uu1_<;zL!BƬ6.Loo" t8खx O+#նA~3l?:3LUL)\7!+GgxV@%LEZNz4r-!b??KcechLƕi=6 ބc.+p.(M,&aps̫)*>^^p4fU=ސp\~LGi/,:F'fA0$z>w<3 [8esI9T%l-yzbUCgV54YoyzW/iD-n}>*ov J͆?\O`tT߄BWڞZ3ޕq$ٿvZ`wƆe|XDVbfOwS^kfEfU%aTVUdEyAQ<$`]NPQϝ+P{nY{ؙB$6n jO#s5Uک?Xg[w򚤻 { { ӿذBA4Ip@ޔS-}RZ.-aCbJץAh} U=| h2 ,%0|( 1g2VkK/(_H, [{"BHWj#Բ  V ]*ʡw$0iTLYjtLR"mK/CsP1{w$pl` ^ecdd{"  ֿFء/׿%r5 0X]$rqo6Ym08}LĀd= 寁6iW(7P-YnY6?Sz7 acn:H1oní"ݲ;ڰ/Dlr 4w trQǻ L+PnMnmXn6m Gn>msӎ槼 7KS7 i.1F{mECɥY,>ܯXk/jZ!\Х,+f.R% +<☗7LlʖD[3B$0.`rXDiA F(%BX(-qv5Vbn"d1_kc az0sA9cբ0 0)J)QΗa34Ќ!xJd8VBQF-o4T@NA$fd1[7NJ!iU!rޮt(>]kG\KLED;C*VY9p|0FG+rg Y^,)=gg`✔rV}CƇ;%1~PfylZ QGnZ~UK^l!D9`w._A|6|oZco-%k.Ɠ;IG GVXeIg%)7FrY$ݪ գYQ;%j45+{XbJe)/b>U^qti-9$czKOmISC26bQQMS{]d+G"Et)ktDdB-ŞAϽ?Z^'F#)+{uϽxBa8 `<)Q˘Mm Ϗm'K#vK? Tg8U-9nݭj ϝ:UEWyBi(2YNуՒixy֩:zHkp$?~e)e,岞K$e.;+Ya$`ZVJ $M$;`-1<(D)- RQ EHIA3 B@Sk1'`'RF-VYnM5X$==3%U5c253VLFjbjF{֓'[#9X@}Z:.)&X8$:P01)AP5 (-%Vk(jz$\tpnN3leRb3pj!׆Mulp]eD?&YOʞ\%fY~~؁3 =u eqsIarXb*a+gK_j "s,a04dK e@TfwP-)%M l,`"*a" "`v\ɀfHUCQ+0Z!}>[Pf6Ť6˕!k5cAJ|i7@%jFz _)$@a!_fٔx7M*#a@F#9Tݲ;ڰ/Di* 2Y?2YB5DjOG; l"idOeIyùl"gD+˛-V7KoaJxrP_!"J8EZpZZpfeTkǬju;@}>Qkcp=O-IoWu\ᅽ_n;ZLt(asGi/cH-ܴӈ[bsRm@Dy׈湞g Bqv!TZz' . r<炴MSOyÙÆ1]jBUe!G^S{Ot ]RJ;Q|*SJv&)Bt\ROL&r/7;HL>,; 0Z VQ_&T?Yiv m^}3w_2#!e8R60SQJwK*eDP؂bӷ]{#"8N_8J>*$R9Lk'q,ooY$o̠8.`@!qrf7X.b^S^sdUl2hwlPX2ClgNU~<񩒜< 2[B-nVS3Dk9їD! $eaA/yC-CœyHDՕ)l {R>$(C: : 3ݟT;v:"aU@C#z5f'!̇,F=WQ(nOِ(ՖvՋR)EGG3:~.\`q~:edTe(_6!i6D/Nw1Ƥm_}'F'v _Iřj*{?!?$ѕkBy͎ wn[i_"[5Iv%_EK|-j()Q+-(ORlYZS8RYo< D('y5]XGG~dL!vj7mήae<>ۇTs_"ڋ`Q%r80)_pR%/7%r4eVkٯؖN*9mUP;ykF;ti޺*\@3$=[5Wͭ‹+y],L!u(GdD;a n9?#gEUX3FD% v'ύ0dtEi)BiS0C|ts%7t=c+GH)=y AhO'IP GOj]Ly10ؽ:ѸOG0#? Ɉjʦ,VO)OVԟx^MJ&YQӰC ?ϥI>1\2as|7W6m+{1%ԜtMNN챓v^ H6odI%$O]DYHrt(\Su4 urBCZҀX@@*:,WA>|A>XIe-,SʽBx22U[ZEID˅Kc>'Vnkڅspq<-UFFr%Tr=1R*BK$Xazh5^hA.%\jKJh6˜E𢍶m arƄcLzonβcɌt<02<ב1$vGY/;ƚe~Ɍ݅yrxl~,mXojY3oЂYy Zwgّٿ2?dTߧH ߎO*gRN]WozFl;$ӏziӻ.?{wzٽ8;{m  Rw[XW]`UK#|J:i!Ӿ xn]Zt}"Qo[ ؾ?Peׁ5(J#4fLrf݌Fän_{-oͮY} hO? 0CYu mfЧ}kj͟L7^舆Vd )t9°.6m2e,p@aeauv2_t>YAe FGz*cϹ-xb'cpS L>]?QrܟEh?:Y[zOsw~(ӠyY~~lBN]'TDi|/=QWc/m:}{BzuȥI>2CmjX>@nfAnowUGjS㘦T7`ڴί).jkeX 3'ߔ2w!ߎࢉ>nzu$]:B{FIoX~5ˌ|] z9}Qw'yUu[pR $HmW0o'gg(.3w:~z_χ$ո|;!+Bw`03!s5%#i^oeW fc<怟&K_"M0hChF9'I|;׾_#@ Ϳk TYsvWs<=Rߠql;O+@ fѶ)+?1W-ǣM" peД;?ahd>ڃ0G3e!?VÙN ]!GF(4 nOc4ʔn}cc%bXԙ8K/9Ek)%nʵ^^0ҰW[= O%7[iӞ1N_? z=M[B% w ~wYbz^01j&Nũ\lwoW:t6kV3?%6#?t8?"@^{Ǯ鏓_'gzD8&|7Ru'# 0D &L>H[ Nxm 8G{֮~"{WaT.JZ;.>omxx3 r_!YS[Yװvk *wl:8 F6H'G>!l%`oS > ] 7fHy؈O$M]4#]2tSk@c)(v8(Xx!^<1sl i ۜy7'o+͒J!CFCG)%8ACu*ʖEre+ZDC˦0l.^ G`e\nm:&e6lͭ|Ǜt<\ti`w甮;KWQS58PиqyrF7%m!;[kyy:JnH w_&w-7n& oL#a&yshz§Ʋ?h&5s!r>{v6v,m?V-m݌ar$\zJ\oNe(9(\iI]M٤G 5܁ )Xawc_5;9P8-jTP=|Ì ha0+1'5u F9FSK090ɽtuPѴj^l%wk#-RH^ӓHg|<,y*N3 UQn1(%CR̕pR ̌ɢ㜭S^/;?ÛVG7KF]ò5cev@_M7*2vNvcP[U ʈNgl/pax*޴[偖nuH\Dw)SPoʝ(?8 2d/&#H [z!a>C+˘k#-2 #<n !g iZ)<1} qv`M'1RxebiU)>g4։#馥2COq, >l0ySbBSD)FBh'.vJV-h,x a|\ɀT&Kϊ*o`4MY,-:Ո;6stӘ:Ms`AHIst. A\Ze*+\zQfEtcQ & n]ч4e\7E%'MI._D%jFJY)TZ,(C冂2o+y[O1Ϟf,svS]E4H~XìBC۽kY2\yU }ϧZ3 ] ,d39@KA=9\rWiZuo^Jߊk L`\yAu+_DYpJkF9r EƝ~~L9/T׬F].Qt?*=ϝ(`,߭,?PF{Y_ȿɫݟ?&˰t[I@h)9}5PCN>^:}Ha-GD1"b ?GazWp yNnſ0\mrJJFګD_br!n s"9`d rh/>e?[? 2:aG"{^ :aFh8%eiH1K&C/ieW3s^]e"V(ٷ*GÞU<ٙS1;LOdfir0lk0 mrev"6Gn k_u|D]de 1h8wy`&LM c͊ } 6:!,Q~[WnK7U)ivV?P[;__ ႶUćuql_f<6v)]2V]  PԩJBXPQ0&i*W Ev7Wcr)|Fga\m1Aq -OZ:+:v[ݹK*NKc@SyhYӖ)PVR])!1+3S/ZT6VYkȞ/]J:r0e{)D˝&Ehc޹僿e^ILc*A m -6ooJr!bKGQR'.SMؾ=Ѓ=].~H3cyĞڃ{V)g jdtviowM!kFeVdf 9$7ٗ 6'%Y3XgiEb7{Z#F=WbuUYwt%yttat NȴXةLK(?#zh%8B8 0( *׬߮ ts,Vq]9Gg;*f,b pJQ*MP;3`Ůoj%K,6w{eMP~ܻD/F/ˇxccj90v- n=YثW絋g;^iiEh jQ E^*ya''M=H Bo"*(+Rٰ( =VbDe2 }+PhIj ?3x Pгݞ>kT9*ʔBV:T"º*c1T{!t`re6re'j~~UvJB)8Z. NkŭX0WZy @„ r&h%*+,~).tV^z1miX%3¸`K0Xb'FP^{`M :@}K]!::~6pNXo@a|D8'%9 H2'i qr+X)8ACl$1SG2uuV R 2%An|" ןY | y`H U[4CƑm&-\ 5ٶꈘrik+gJjߚ>ݏf_~{@ *KсAe)j:=Su_g L/?gs/] SFYS=Ztߗx> 4'ݛ bK`ϗ>s67&ȧ(̇c~'Jv;dS8^r"9Ry:/bpj򊌞"pTfӡa(㱬H 8m d΃l׼v푃 C, 5?pyCS͖cb"p0C^K79-HQ)S8DvڰP(xĚʔXwJSMj-F*nSM)[Qԝj CruN?H oCyAH_DACA{j^mQC4N9FZ$p^/Y2HUVqAݱ%NFsDj߼w7Y7/#aͽn93ytoi젍GeM|;h#xlvtv4Qdqljv25.BʄMC+XC͊.3֫jY61uf;q\MiW_`cÎzƓپe#1{.GfS`K/۸5QeA_6D*ًx^^~eWG|"_]]Rl:$5yKŧŪqFdRCPZټN#*4.N baM{-X'w3g8 uA}A_vۛ uc55泋5i2ʺLuMե]5=򻺤J6l/>UJ+!&_v/WZ=WԈy.;O)Rq:%beot{NSO{␟LMA}lXX_?E4G8l4΃GnN;hy.ONס[~^ivCBal]QOAb":hݎ<i*ڭ y"%SS !n)h\ RD'm‚"OCkrHڭ y"Yj&nθwFU].W\ Ӱ; @m(otqw~ْ&a:=oGN턖- _uS)P2)t51% åJjTݺ7FSAcJ(k#JuD$瘟1Qq*=+Vu '3 >O6m'! :Z *.S4CMeЊ:YkryV)'`f=6)-X#Ͳ椥 D%SV};:-I$WHk"onMٿҾ[ϫOJ>Y?}CM͏EKdS&MdכȮ7MvG(KJ*bE)T‚%%ȒKdDTL5D;cyE|8u3UtO-r_lv#7sm;)U-ӼnjI[w+q^?l/ "ssvylNxQkɟֿN ) RjSwh)+];y"NTiA| 8@AibIK ga(¨tHaPF['hƨwr;HI5#n'FMHo @ݜMj,"TKK'mAҘ8*anAVp:?ܗ}x%*袷ybFtxw36wB{(l?ڲk+8˯z=#sPP!bSPyPBD#%\r[gb5!͎D_ϭc=\iK*b}Qei2-@Y zgz*7ԚЦ5{0QA3u/7/}ظ[9ٳۚLb 緓bօ_7.+-rNU핱n^3)5Gpc"z [oq/bXvn,ao`8fpvd; LUrgW&,=H¯K,oXG(Y"备E(o^}W<͌B[q{c~$(zed8^JV+9%k袼R9==Ru"8@HmiX>UU&謩 Ce J"p@$W(P*w*ՍjLcѕ6}-G͚؊TyZkFքe@i].F۝j49nNU b8CJ%?zlbÏ?@EhF_Ÿ/xq4bfO}Qu,A|y_VJ<NfgfncD&=\S~0?v[(832ɪIΏɃ^opN3$qH1^l%k)Z \u;&0_4uXt3$/^rFD8 _$"9YkwZ-ŋ%WT !9޷ZY|8 " 8S(xY[noڑ.8=\פLЋp< QI23-!*m!8XAX+=pdbU0a$=5Ù/@g`Zf\i0La[9p'_Q"!.*8Hńqk)#<&NO!Ht3U[MJ3 vli93)'SĞ#Le &+W]1$^G٭S}Vdӎ)BGjqrڦaۦ*aJмh<ټ(OчM&5J Fo~Q!4 uyoffk8zvj',<9*4ת'r Kb XsSšVKNZR;+bf8y ;,@{J+p=Tݐ- H35ad#Xk}eLǚWW$vB&Ė9L!ɔ>(oӱ 왒=o2La.Pi;z v-UoIbL{+YX{`޶ p;) M:9eq6 N@ZѨBӒRQ>OS=R tDm&׹v;~#p34KbpQmr.b~O'Obw<&"$SiZ(@z.g'!MؒTʑ=msiAY zgh֬JC ǁViws8#qJ4娦u-otv\@"v%Z*, AAіއlƌKגеE5 ^ncF6:TPQeXJ[ɀIɐe:$j*z60Rk*Zٻ6rdW-,^ aÙ93Li:ȗdSl-n5[(0n_B:TOҀYH | r5j`\!#pl]S*fm) i!TP+x ʉJuKYH3UIϫ++8Ѻы*hmpƵ{:fDĒ'5eΣ&I$k1i`xEuO$w@7uεun:|G#mʭ!tkr6)v{b8ҙ)ǸNaw)I%:0YꔀXbOQ„#&0RP[!ڳ:LHuQ ծwI`Xm0+*La|RQ-(pEż1$x$*L'(O%,)izj9'JAcd6%`$\ M^y6@ȣ@iٝ1qcD_1 H3BIiP"H\i#R޶>pTKI1tޏjBkHDBW7x Fe)#1Fy).T+q=5,9@+@4p 1((%t1t>i5A5pyPl48Gp"pTZ/rQ0,q6-#*uDucJn>ԣ% M!nc8%T3:$ZHIFueY1q9'ģVQ8~f8.`02951CWsg\.}P$m*ō08'q^E8KɷڵNA$!بY*\yt0ArtG#`!Me?ըȾ pǃ QLqHl@ GI!(ZE^/Z^13UCk0b] ܁hI"CT7]cUeٝ|UNj;bk-jAbC|N2ʐڡ pnty)Y4>s& j/bN+v\"d҂r6.&FX÷3 = 4b~q \QÊ)[MXELc\J ->\׆6ܠg!,*흨ru6mFh2&/P($teП)H'`یPbU`P1Cm STD4zw0y&tc|;]5нq|i :W3-n'e)ڳ- lo(a4"#蹐.@]E?*W]hy$@jV_GV7+Z z">x5-^ =*(xTnhHf'BE\8O=:1/-{v{ONRjUt!'{S$!q@)B6^~õdvo4<@Ƴ|7pgntߏ>_k8NyWҺ|` WAB<8 q#b}]^> ~Я(k/9bKYWt pꛝnj-p=D쮨\|VoѮ*(.ɋک/MKQ~41VU|ba||w=عia rxnk< @Pu܅TG#3@PbcX{p{j=z训VλZnjOuzZƹ4逮_I]Д15eKuO0e[ .̱2Nwc=B nY^i6,ިܿ)ng\aU1lcͽiٶჳp+%:ovcmx=o/Զ#̱mGF˧d<G%zȂڞa#{>JC깎0 2.?. c+>pڿ3/ +[BxS`S 7Cp9MӒF4M ޒ{Ӯ۲+B(9zṶt!/DǺ38{o`,_*<%4/Xl)\*# _!cl!חWqGs6\ݣ>>^8ͫ'}==?i5|e}q,$krs`Ml 艧@uuȵp3j_~ XSO,**"Nr2ʒNy01 )7[Gޕ3 kEL9o03}yͮWƛVw؟-OyqQ+[]|"R? m _]Sg9?{x1~>߿ǟߟy{cx2_d);8&GF}&ΆQ&Xa9?^X=s)d?)15k*gT2ҢVDT(T{XÎmFhJn?yJ r{VjvB]hm ֌(Id@C.{D JuPW&~Wbi8%e>$5G3?yTT~5Z%Lxeqż4l6O&Ds1]c~>Ӑ %)#u[bbϿ*WWо`p FWR+wC2.FGq L'(Ahg9"JEA1V(ꒉJ#ArZBj,() ḱl+Fl"y D<:_Ƅt%V<ؚ;w ¿+7k|qZ-sL {] hŁ/~zHGh)_ Xmmxv^l-kJg3v4nz L1pLY4Ϳ\Ia{ AJ^pĺf,'ݗ7ɡxsΛhk)nI@8VMә.tvv@hN 6Qj OGXk2֧s@_G\Kw;5GK'$ڊFLO/0A2D(5;~sԟ< ֪XtL ϻ_lE%E ~v3dA^wkE2,z+(*@ $a8SH?2kDށϐbrdA?^ `w} ieEHiMrPr s685g'FQ U頿O4gʕ-գgQ.*ΧWjA_E72:P( SNq: ]ܿUNlA3ƈ,GsyIԋ$ ev0#n]*-Fh*CaKB;U%րJl.ֳH.v:Bwx./}F({ڤFX5( I$$xb=-0D #O[BF1Ikp✺J*=D:@i NabTɫ-,BMx\JیP`3iL^ngP#T\9["}SN+'fSZ$=YTvp ==6#@6<S9&RPӓo3B[rDZ^_^?o gۙԖ<t_[R}'rP[9C}E L5(nb"eIrsUb*߲L|GU'L|2朲vldPkq7#u\~,Ƴqjg [ЫV_o,9 4u8Px8qxhm{y0A$ns3 I1oz8 A:gc,:dF{oG-T_TɛWx~׫oHoI(45/P7&6wi>w"~q.gvy/ >H D޳xUbif?^ޠ߁TVcs񷷯PMg;Av2Az_g/ꟙt_\A۫+|>A*1W>|WFD.a ޫo/@ngÚ0Nf㵫83^HGWh#+!QHgD9)@"QY|$R0Fs*p:LjH t cT(h!kaO{x1FU\'m@V.hJ"[D@Sbm_bHn5ѷ?m#_%HUq^M18OȣoO5(Rs!%{D{u%`Te"|8t4C5KK19K,D! jkq)̴.ѡ/@ ĻPB E\XJU%c9WfLAJ,* 1k"cAD>ND!23ä1$]2xvv ˪CNSd>i[0lsչ6M:)Fj>R'+cfPV+.P=#ٳbO!󞔍:*3jAmR*M1X؎V!KV{ZJ1D +fera;b_ ^KM"FdwP6Xi<jl MR6#!(bd;5ͶDg!,2HvXsFWNb5:R_+޺dtfAbXV^HvsKxل|gώEbhd\{)hIOz+qAQjYՎ:0b@Q.Cls.8 2Q+$)Lb #X1jBHM^hɶRg#s5&,gT|耒B;jhV͹KGd8ep!U)sl\ 9H'@;;5˛b|B1;ڊJQ>{hXl\EAbu`a!䶡F}>7=SȻ*48U8Lt羃4}7F, Rde4J22(c*-1Fb쒻RTShBd^wva;@zwS\/к'&J}'T @cV[ 삄Qh- y.Za 7e!K䆐j 5at{=FIkK;ݵ(sb-CP2PStumUI2mAC {BC+~ӽoOV>QS_Z.;3|m0,ty"_Fj 1prVsn%q0&9^gF( pp/jW7v@(GfD +s VGY'd3YeV%'ZnT "s2jBll-+Ag K{IJԓ7LZȒGgW#[5j4ƶ X\4H.A)NT"9-X孠t~'n G8jbF-cTGLih2R=*<ş_t Qz_gLt}.A؀a}'}~#ly`F_!Fc`(Ծ,s+s-7+QQ ܔiܔˆl :4^hT)hޘ||Z)/ \jOenPsrË3*c?Wr.rNJn0h6xm{'nW/N?~{|go:٬o5?9ZST6o M^3<-e}kdg7?MBCf9~/~謼+5/L my${iѩ LVKG0*Bp:Ba9r cxM;]D/ kKa($/3ZD?e2'@[5Vc3NՓS@Cm@<-d묢='ێ/^S$}z>'Q[|y͛ˋXQ5}sտVuþUYbjUݘj~,v/m[ X0놾|Ջ1H䧦*Bs%w;dӱٓkB> %ͫ)7wg3qE'+#ڍta3j8haݞϙ4%ZMOmnspE{Z>6Mq;nE<6:S-9 VCog69:cDzuքɧ&v늍C&0(ЗU=z_Nj=wջ}mO?\ȱZ{5S6[Vl= esIpx~ۜ-j>\|rTYՏ%cz%^+_S0/Pkv9M|K7٘mM6f1 F`%2KIsSu73eJ1Z$}/ 8:^}lcc+<}tzKǾstZ>̑ Ξ}TdXz0(2BbIX:|vP V Sc:$,8ʉb}̢({vR@(`T$k^'sζ蔀vt(w7';½n2uwS6ןO%֐'}a(M_[ƒvŋ?,Rfg?+X{M>&ȟjmz`T&E#|?d棫ӷRgYI2gAb4`a6]¶L!G.Oo!IWJu${N7&tw7H >;}(wP0m97sn^wW2XimԏY7^~[!b@/kM Oŷ뜥Yf1|>r:pfrǃ tүfTd0E/eN6e0N}ezӾ)̗D2z 3IZxe8"SI V |A%H$ 1׉;2hU (Iڻb1IyJ9+)P ohM(k?DUV$(Y 'v]傽Geޙ}"1.N1p>fa k4ƉBZ=X.D ։XҴܱ%Ei4qݱR`kmN:U.3iJ$H 0jcI8-{ ԍLMѣZt ?-/~ug:)|) ZSᰈu>;ܡv#)@3s~2Op ?/j<EZRh T*a>)A ~PN"e9Immg[ٻ8n$W,q9`/>૭XIv߯3zZ3v7{f$ Ů7S0 i bG"7^ xCqW+lcp_x’2 F¼ҵ&6VG@q i0VRCDk}j0oUW3ch 6w ^OIiKFHxڽ T3к7 *?y [魳 #F$*q 8.%ܑ^wATaҌ.y#@'5:sT8Pf4H\&-f T S4Sj4H&?F8|@68I x9X Z\S݃ HLG6E,%N qOGuYcBj!@O|׷Ի|qH`b<kBE+)X"?o *76bd]F/`4&.[թQ(_3Bu j+NT/C]Z=kt"Emh8Ufד>FP0>NL SmiIPraOу'T&{ER3W+lv[ƈcܤ @mۤ+d sNz]n=AСRy덉ѱQAz BqP<)Iz]./$"iF)E$XS 2 YVy#$͝8նF)CEɒQ'KoFPb!q^lT^ A D0*5%Qg E%C =c=NO x:} =Pfm,ٚhÊo^]kce>}\r񥊂&X\5˜Q9~2-bzܭ=RFDu;ȫKQ3 u$IsϘ?=+޿nvSkOg_\8:{å ~ם]Ho~U{}|v$^b("[Ʊdo$!u!n8Dt vFD$M0تHhD53#T"Bp\30 pfQ*)$[E,yO,c]pKo֩V(DK ՘@r›I, ]:y~u^Zv9Uk3?mvֱ}{moeswJ9M˔rs3K>=[%&^ T/I0 h*MjŤܲ"5oSD.&ϲX>{ʘ֎OTEoꧺs0mTBϭN{6?w-n&)KFg[)Gȳhnuem!cxQ|&:Ŧ DZRnuݦʠtcwۣNX2BVBrbSp, :xSnSePb:S1ĻQ-Qw%nq [ MgS7Fu3P2 $4Ggu!0bkoTsRIK i2MBh*'V}ܳ?% T&'Qh*:EJEeNk(>" %M7THK mG52,ڌ Gx7%ǓGx e!?0< #y_}8x(gi\JlLB*K"1X!7IÝ^5ծdp;5VPT'_ٍguy4 D49LLVuO6ǿc44'VbE>j (!-RP1Z`:TAi]jť eM W..|?k\~h}d{EYs:U쉵0i*EIǜ,dN.(|UG?2}{]^^b:͖wݾ+s"r仼K7_c$Q$ΠG-HHmܼk/&aMI?#Q(.^n+ Vۊ.$aEI|V>V$p4skdf[&LQW3 <(eY6D=3ҹU/xig-uw"U|o Wrۖܿ۫uKz?}ccDFtqďS"T)1ַptua?Ƽ׼U{Qܧ§7sSȖj؏7")Ϲ>JVYk [ZbmĽL EWWӗþ?23ind# L4NZ8:MiwJ7?CF}Kw?̕g_}gWD^79ɾKTokWm7HqΤBzp)D]V4c=i흆q3rzWag=RN:J=yR;fem8?|:R;ͻ&oZ=vn/t.߯~# n2+i{d:t&F9'4fYGuȽRZQaP<K)O ͏TAqPL¸'oT i=iOPF\87AdŽ4[TeY]4vsP4^Cd 2 unCaxp LiZz|u@1*%I4 ,ŻqPƝJog@-TwbxV}fArR5~E[h0rB SR<5=_mt9Ti8Xł؁.W0F,01 IF,ڍ`3XT9 Z(͊cH i/Xt:!eV#l1+Z)"0A`%}7}9O+[Ikʬҧ~jeN@*JMR4*1B@*C D6*g牌I14cl`hr8!H4<"<-CFB zH)^jb<`f.'"DovNa@yv56/p:*>o$$/8F BJ&>(a:jnrn< oT%֓{.O2+z$N#u;|to/mI^-Υ9o*$ğqhXgeu)Xȕ3 Pr8b$zyG({= o#-^p+KdTAQa_kSgv^aڥvGOs5qI"zHxð!0f% MuL G#  (h-(z;.Gsq/b Ehl*DnֳOsρNl%Ȩ.8pg¨NƓy<+b|w3Nь1Xv^# Ĉ4*<.^vDW2Džτb@C.~aPNLÔ0 7,m+e-iQמf'Qù '{id\7WHչZږWe5gb2,YgĞ溭[ @)r&$PM=zjCa:tutd('?Nz`6,iT 'L63Xi5.`tZj Iiռt?N翽rGb|H\eH\e3uyAy7_ڂ.t]RAWRJU93@nQ+**dBZ#2Ԟ:.g'ϻHb0ܭ&U(\ɘTR$c#CQΣHOm| 2Rǐ?:#/$I ~u80SmV2S\ ԍ \?=/@/))Q1c\1 8TpSIA;{mP7 r)0Uj=fp4-{e4${A3iղWLlZg:RkMK&L$f~;xT!(T=iژbybpp<'JGg)L'ITΪO @ڶߝ'L$c3z4#]w\izi)5Xcdy<#֍#3:~zɌs 6̨c#et(c '.ciari+s: =yw㮋f6u9ġQ@o~,{EN9hvж%< َ s4JcJJBIŮ j'렳=Mk9BB;[Ic:mI>/ixb24i8Q6eFҐӹR <蜠By嫋)yeefjcE9 Rof{N0\ԸR$އTj:y'E4{AT$haLnM8 Yb4$N{y6ϊA3CIzk tτG+OMD0Ě&/;j]-Q:V1P#jbԜF a $XP"fjK 2)\&j+1TI(˪b49'uUW5F0v|> od@#0Pkr$([@h$1ED4;ChDca iIԄ1*U!Hba}da* ?q;F޵}3\ό6F2MOny{UM/[LQ{M^)5(x߳ء!R_nrDCP˹[䶺j~w!,@S-o"شާGG#}pgjŢb)yu9tjٱpFD9.EO5$\buoA\RFlZ t_&<ԋ3AJUlGH+9Pׁy{ՀOldZSPKz`zlϭ۽UFQ5B%hV8I3`3(mK90E1o%5. ,jo}y C巫5u 8߽x(;'J ^D?xHUp"i+Jb%ұD cH 5u>#%hg3 j/ZD$ë́?t.4PTu魯dNBd.`ƵzHܹKl0/lkJ "1;BAkf=N:6rƸuޤhTzZNsp㦮va^5&1/~jҵ(z( ZpqvRQ":-(Fɒ//[~=ૂ(3rNRJ>ެ2cƒz W9[Hiitȹ\3 S\KFJ&cYv+FcXKLVBZ2@ V[rVJ;IF% -s V3<|7#HH %Ee* IS?W{À3-Rh΍Y=ObBz}jfeq0dG;!%@vx䡚IaX<%*CZeYLZsTNқo)j%YiBԖ+̟a oT4u]1 x=D:x͍֮L@`3gAZCYi sxhmVr™a}Yf~ExBcNoZd!^7RY gʻ#R0y0+ۘ$ 1\QRcF=V=RW ͮY:*Mp˺?z!_?}м~/-NӇN 6왧_qЀu& 4.IE`+ k`@Ĝ?E]2Ƹ- `2+X\RU3^hMUeO .U"Lۉm8'-$"Yj~/20 ^.EK߰ ` ?ogji[\(w| T۬{ZU~y{yoH?N/ڕIt1ku_O"3ljf̟nt|Zdͯ.lּ0kZ[[`?WOypry rlHƋο#ym;/g3jf*t/o-/c@{O@".{?|k{&/Ik1_O[?~XXLMv0?9``jvSQVŴ,ŒUÝ=i#lڬny{N9o-yA52Iws@&ut\Mn܀nRv^ 6_ة2"A4h=#u0 U:OcBN櫀XCVbsl?3o |zO`Vt~G5>C2V a'[Mos`C}v1zdcdn in[fmIwqO~n#/ϷMb^N:§'M`q"&5zxU2:"Qa$,w\ ,y_nH(=ӟ iۃ߽1D1| XD%I& S&} 9J]uXԥ?ƥۋ$PBsxS Poz벿Djx<#}ƪ-CsM"W%Z]Aϼb)Y])Tv'L'Xle E(}zqXRF9V=Rʹ=ø~v|U{'GS'֋)ՎS&t鹬5bz}faVNpHr7b=yQ{5{(XUVݖ77waWOY9vR(UEG7"C6a'<b >W7n#/MLIn+.ZVO=彿WkAn/rwr?eCB4䝫hN=C<uA~#ź28u͌n]h;W,jco FurbNxѡ7k0떏+iݺАw;: #V9Q8I+Wl8"rV@Q{EmQ͕+ƽ~Y>V7NB9X~jn/6q퉗4g=GEx{1g?Ӽ'"T!r.(:[{&#"#⡻N&m3z[ϳM's#ˆ tS) 4XZ|ś ܾ%lZab5ڿlMX[[be_[k/b<DC^AI`1_Bc#f9/Y`:7ww܍ -hѽңƇc{ 訷 EFKoߴehd$2,p E\o{Э,5/5k(ӽkw[qpDg?[NA)3ƌӤ#ܿXyn1.k&E, bBIQY>=1>=BvV>qZUQ6q KJt8u7p@s 'enʫ[G\_GK",Փh\˯uysD?xw3o-'cd il^~u_Ҋjw?༇X?_]1}(:IZFdMO#Kb%6* j:sneC$Cf~S""RO-) wT< eEx"ŴqqoV[{Ԟ(4 T'rv }7EĻĜ.vڙ.N A\[So T@C3IW O6W$hH _\BI*  EZ @cbه~vkQ>b'#ds@wcxw)G1f7jfsy:èϑ$-!b>G)O,K`&t=4}tU2ꡉ]d>2 9d8ixIRt9 -#"g 4]\%<& 3#0ita .b32E4_mn7E/wnS5_nI]URn/" rR46%K2 A6?͋iOnV,(}a,LC^%?^rI*ZgХAIU0Ba)#+T>i#5I"|V8n{ԂEzrV,̭1ĚL_b!Øs]l ;:"2a>ߖZ-T&$˄@+Us\wY TBTut`|&b/^.ry&SΡ\ӄ­n) zUFUP+Mj$΁ =A@Dx ݃%mW}F:n qB*݃|TĹ[ߖWИ\I/ p͠V!{=*@Ic# ssbQCFFJ8wǫKon|zRk}#LS`.x%^愅ܜCVbt~([ʁ mƹBe8$SJ Smv/ $Ek@`]Tg@.޴1^㞸Xr^]5$ ?ifuTSx9]]?r@ީ-;yTCE(#/Dqu4sH!]5NgPXz,X8: (a$әy)O 3NǨLaE=W bu@}嶨XIukW[/jzqevSm$ٻ3W^]W {r>h!tA|]ձx7Zqio$3B^A=ڐ]?9E3]͜w'[9CI>_Z?" ZYgE6yeP|SJ7OYȬg/S| =Q Ak㵯#ƜT ̂r^ %#=PIv쩎b֍Y_ztۀ)+K< (ɱ2ߓ mS\y @|peec i# hT0y21R+*VMYҪ`e[ ڊ+ε{"Z,9!F!5J+K̥Sl]ז43{6u4*RP# mg@ZJ ύZa~EVjXK|h23nlUjXj~j\faXwp^AGjf+zeju@˪A$|[jj6y +EannwC9 Y0 ]bR#U`I.[U;X00N~ '-I8<& j]hmqmt+Fla{t L*\=c4<J*\0QMˡj-gF+`MVA5*`%ncY4FK"TQ { aZЬszᠡS1 7-)uW9R~ } [ 3as4 );".C9׋}Ln1'ntx6(D>yڐh4z;~8Fp#$䕈Qv/ vDkn,En鰑sޖ膴VtJ ePWd,0xUE>3avf!vc o bJ8(%Q㋕jQDJ h`~梨f kbs B#" M{ɲ`v椒LE'bL[ h8#yiY5#&~>%|Hڠ+p"Ѣo5 Έm*̵▔N.bZ^} _0V/(+(2ĂXY.UJJuY2fHJ+UaT-%+jQmݭS%#H!Ar 9'(T8!ǁ;`vO´ %xP@0%|wx;(FRR$Uq1o=$9$.x2#GgAU'c[{9*w'o&'u85@u9MiG 9\LcaaJ)茰7OQ RtwИ 8:p0Gg ArtVbtщC 5*h;3pt_NĸJ #ں \mp"dW 1h/O-#4]d(w{N2tZ MA-:Uqss*yQDjfD7{.ŝ[c % ((aǜtDJӭň*I@2tH1**V^* &UZsV:XuzG:ǂ;L T(^dVVE^L֋b3䫻3;|sSbe'F}B:'=xKreY"\ɿJ3,gdr`5y~0RL3M8yдH0& 4y~Ӭy)aᆱWLf5eo^q$Ru)յAQRT,JfE͊Z%5*!gaJ̙֯Q K. JD@&/(` <U$X[JX*5*ܯ Z(ոun1jEC8= X|Pz7Oa=\ ҃*<-RYʴ>~KrU m"AƲTﮗ*2F`[>C-IaaϷ( 9уN#gi~lyb/=(N+X73^E2,s0Cq_'d2$caMc+j$4¶=EIBݝG&{T/$tVOap(Ih%FG$DAĹppuK#ɷ`)k8,&sEKLntvű@"<`W hL6hXٛrB« w}Q{ ]Sq9*tjq&l_|K|]҅"O<Z%nqȱQ5i(cgdQf1,q6oz:]]_/`a'ɭJ:[?N A[`V%ſۛ_Ec_w qKHb% oH) ).cmxk#xno?flVY⒫*DS9D-u6m J˳>H7nBBٙ-:xtvb8-"7{q?9~?iսw"QՐp/UtIG4A :y6շjHcdNQ ҂>6mA30T%73g8s0j 2^A(HkҀnkGh>F/1¼U1f\yz3yY7j 4oV ơQ%7c.Ӥw 3 R7=A:MԱ7{~ʌX ) & )eDX9Ȟ`\ƄUQ_q֔2,BʁqO+}c 0gSo?hR lG)HPQ{J7IwJє[kN$IF2FN:Ѐ1$k (Ex[①F nN-pLGN,@qXUT[Ŕ1&`Oai A֧̽1'n'< AnM)y]]T/@  dJgq2=FSNICi`9%>r2Q4:pQ9Z5jD'S5@8i?fllX7wzf)ԏk~wuYV̊I&ٛgoҟ&~Cla.\U*gEi J(u ͭu]jeXYUM VkѵhCgL29ss+ww_so/G/moZ8̙n;7۷۾_%7wYM>w⫛~Y?=0;U,5?}{>G4~.ο3#R[Z/q_n? &(cw8xqS !V+ԧ𗮴s vQ݄q?+yVW\SguYRKnW*A&wu 4cs&35qj|[j[{?~Y)sVZ( {w^޺uY %wx lbJe`-\#E7Km,A(,tr 9pܫ-**-r[Z PZkS.һC7KJ]_72uޭG-{?}[ap8wjԻY yurU߅7ۋ}ܱ~pf/-RُB[L:4S6BQ+t~|x;?w!ox\Y>?7>-Of.|<ħ-N$cCA7}PpsVzV'[}`mTk*mv6wڒZsRjldžsKdPIfS`Phѹx+BC]Wxoz=ܔ=҈ްH2E(x>N$S*EX Ӄ’x"[&dz,5\ߖ$ U.(8I&)jXS)'{u] SSUg&WIOȍ~rcUݩ=y8|z+dcr\>gs}- gzz.=nz.XnJ C2 7Mok 쵎^6%![yTyB%BH5}>AwXj`1^Hw?=8cJIqzٻm,WT~ڙo\Sݞ}ؤҕLf^fd[G$IT,S%"AQ6TmQ9_i :;{whN$;QX&gkUvm'h ;={gP,35;wx@xƎe UF'bq &:5?{gF=LwҽKך H.{~'{:{liZJKԮέd։{{uZ,}YJ̯sX.wP0kQn#'vY@&m]omWRj363àp{U*JN ;ysD"3cMFUߟA G,b\hn,&cs+aǁ6L\?K }w.X ]l9_b-="FIRrivSP Kr {#_}n,&i&i < m)e)9UT+J)#&[m 'T\'0Q,$#XS sQ0Z$S Jgi 5` k]NA(,X[`Χm/қ\,,1^>=H]A] |~"H*Jo/hQOӬ{wgAD W~ldd2~Z"F}o_pFDL%b0ѳ_)O(R~T#á:o>_؀\yBF68 ыSZwqAqoqjli Eww?rubi(r %WE]֣?O yq_|aaF#d~zNkFqYQ}Ibk5)'F穑h_˫z-ʎ}iT! R㋥qu4]ybzPl8&]Pk`"%ۗ EE&)'gqi^Eh)Yw`,K61Wj׮N8ߤ#!ҐʖlLB3LfV` ێ\uxU);HTuWZ5IDr+%WGNG$넴Fd.WrR!j/tA $l~8紪1Wpag)6&ڜXA' W xs9 G6*!j9`4 loob y\SeRvȏcNeV)>6Ǩ=5^PQްgѰxӇ~`B2,0/w#l9Ub"1) iK-]g'PES/QO:A‘v?cq%IO+*"Eb'}T"iur!O-OwXBڏA1vNG_ɨA)ͷ~{&%Hu.zU Xb}F%-۰%PEygAmΡbO˔ܸ+4(ϥ{h6laYnYdh۷ηoo:߾-3?i &'9`jr̕(8OrD?\߯7c%4jlgfó 1 +f❃]Vz`~NNoW/UscO]ۊ[SW?ڎ0.2֓WmEnܗgfbO3:k(>Gd23qIFO fD:͒e!r-,$ZoA &U;mƵ-Ė(tm:hW',B9](W'@D1Jp~Eq}BqUQ9gq?.W.@9uG<@>q2*'W@Q ~H*e5zhN!0HOO36ݓpO ?vQ156!mP:Uaj7BΨ|E\%"r`Ŧ.D6E&X5⻸I.Dӽ2P:WG)ީݥf.]- b]a!=]<:f4-(|vGkzxAJSQzo;@Zy= .|l{5(“۫R]4yLg͖ebFJpV-^8'd&kȞE؊ F}B!h-EBńwC7l>k;TJ}8~yb-=u̽q ef%nS$ss9%K/xRJڋU$଻^LV"wRSVa<("2e-E.I㔦Hm֑zrvHHS!П@gO%[pND$[TQD,֏MD;o_Rlt|亊Fd6nzbH4!f$2Z`KT đ rH%bJES1NVku* A*<@:5ZNNQ VRDT +UnCChVR)FbêWG\-vŐ2UuEjku!\ʅ%So>IDG +A>]-HPBPYykU($l$QKjI*Icna_S x .}̔۟mOaM-.|y͝mOR?> o[ZS;ژ=i]~x7Í'໱mNpȧ{]zcA! pF֦+EkYɊ'^>LsrZ+$H 0c,7̳qFi (_c뫕^Z u ­ߜeSl`r c8RC JF$m*S$-z E")FdEH2Lir7̷^hdB`"OtEZ&4QLsDQWt[vwG 8'""GJb ]E, Mr4~4g%<$ ɴSt2'@JH"´lnwܜS8L@uZ7/j$pߴŘ7,U2ݴ$ǀ_ZwTJ8[i @y EP1vdYqGS H^9IoU\ $r9nNAI銍('&<^Ojb2f;У /}+jm yHMTslK)N8m }+9bbǎ|nHCgd1^#iuRw:ľXv^*>|yû{{V5>p^ [6/.^<:f4qqq 8+Xvr\,n~T Rڴ{mO 4;u #T!vs֦7]!,m=ծgLۿ")(_{{x BxI5\)ڑAdx;LٰZyZFSFb ?M2`qtWZV.=?ٞEO՗7rʓ+F|* Nl:綳X w؛ c) X.c>ʖM(u$ͥ#K&H)HJJTRBԴMkR9)MTA.}C%E ߻yF {%sC3 afCz )&B Wi#G"Eb|S_ow+"^x;K!q簽ڹyppA1} ظHTaC6SYg'8>;KDT)j[B|^mZE=Q^mE(H]^"W[<9h$ l̫Vj᧸iՍ* >arfP !N aa$13*̈́b:rAorʥٻ<9\"7PNZkC4՘G׾#<09jzn^Iq)ٹZv8&X׏.blzw[ٶ7f:O'&p7 ޛNzs71,YoW̓`K<пڼ B 꾣OP"65ȦQon&'r;3_0TN]v? [.]w:L.W|`oQ]>? ѫ[pl4@'H0$IOYW:؟~4/v9L hֱX E |%& .E[x`x#B́HNuz*s8kl5~rԞKbJ5"秬 D̨+ˉa@e!"I֬Ʃ}gq;#Q9B!7/ep3۸=Z+՘}&WqtkEtj&dGT+Mv:^\UqPn'NN˾UA?GHglkVJ'X'էƚbD1 J\[`͵;뙲R3=Ʈ4kɄfe#6<}|'orJ߾*r&3~,>} J mPj3 5-"i/Ѳp DgtrL_0⠴-iTKOHvu䈩֖qkXDn_{ꋛcbZdx! kkʚJ]uBޒIRLH,&C:)@%߱ $BJb޿S5綕^"Pvj˧( t=oz{Za {|h* }"zQpxKaQxF%; +֬4&x#6Pޗ;t% Z-Th]6A [r>tw.HS LKޛr(,¶qc  CwqΪAD!=Ray֙VACo+\_/az?x#T_Pu FCV&]ќ$/&Mۛ 7e]`ûQRdH @} .U)cN)՘slTFJ-}wژMJrj{u_t[4am Ec'dhkxP7W`WAӶTAy_B( _ @ ύV)ڰ KEI,5R1fR,F`L&j#IK tCa\'OnB| - B*4.:,`aAz $>&oI>=Q z~QN3᤬l?_yt6_@~4׶%M1,Y}wD9@R))H?.0H\)e<q*\~7&>i)$NJ) Rʩ: )G&Մ#y'-q97yҸ ^\'.RJQ}I5!)}zR W(/m)+h,)B!bh8L {_ o$arX\J1@h<1ȰFWXGmJX6t*DB*yBdhN5 )Z~[\'=/mGsU(eE;fp,wVo4yn:^I:8 NNMB7,l;JW1h\z )D*Eɂb~ n- x@MM*zn/qx$$ʼn]G,*~Fb/K{KD%EJ) {Yr/eZ?H4$U9ڑuޑ-"DfbiQgPe$U܉L i`V+ -Jf?lt8ugTx;;Zbܡ ڬ%Ont'2n%$S"A,uT љ{uT?a%3 xę‹2bj%rƛ5oK2T[sqD0,jyȯPT\x.)Wg$S'(Yةxu`TR8J# ;Cjd8x/8cVVJ}.l7S@Zm,lH!K67 iwYv<oܯ݉ZrK󛽪B*gdg*BXaq#s/FV?+WV4Ur C*kHx*AHΟ!$G@RN<[+VĪ+sά]aRhQʪd"Ȫɟ/ &,S7JOmEu%TbFt(%(?ql0Ӯ 4# 3̑p7 j쇏C_";m,THvjmϔ :TN;[bM~MsJb%hsh{ju6*5L:rK9z)׾Oze6beP(A1${}YFޙ P;bHJ /ͫ:CҬlɶVhAB ҠACZ.gׁ/p,P~ #c6Xe z7%==+Caҙ-6W4 )'Vx!)Uc  xMm5{1N̈ɜu`դkS +[q@7&ཷ\Խw[SζE_{/J4DU3_TޅlN ޤiF.JȪMJ,}@F8Uo;9rF|s1>vS 8F+&\]P?/B<$0a<$0wy2xx03I'C`yQ~v&y 6*8+S{} Q&- }ݽ~>(pT'JS5] @l01ټ :A&jD>cF(bo;ӛ1``DMq H 8w5vy}ZPc(q0BC_)"JW>.dS>pkK#Rm#R:q^|fnj]5uMB&Ob)ĬOϿ_F+\'ޢ^OfNdǡ%YZa2?o=-lA-(yχs(_.X@J9U8]RDx}a3y@@?|#H];wCi#7vܱ7 m"њв#A߆穿 os֑mfpTr9'gGf{ a@fDQ۰"▻ωEnMlsM?Q='BsLLֱY*^6f}YU}.MLk }gp:C13 "`ENR |&0(~~%5QPV (B(R`Q.[`K# ӑQT7BlaEmTQ˜9N%sɰ-(2bM1c(]˸p 8r̲aNyԞr03ϴݐr<__]>g> ʰ2H=bn? bP_æY/«b}W63Lw{nQ͙#Yk_|dP}wN2ۻfHmU G.H,mSIuY }_nP -ibߋ05Z{իuE9@?Uޑm6qgRJ:3QA>׸g h^gs&A7XjhIEܶc @/{At@LX-f)0">LJP`%0$sK20eI^jL#uTS(_Z|ϧZA<(2$Ce =Ű0jb!O_'vɇhX)řnh\DȓL  ->إu9`a\IJgܞFp˅+EJ7׿F|3 R&%yg|*̓Oloe5ĺ6/jJ@u{H"OE6,Į~# qǹZO*W.d*-{v.h<cn-]Sn{Yj*$䕋hˆto[^15)eTv@JOS}ZooҋR6 膔.K5"ץ-I)6XtBJ1҈jho^n'q2wM)%NJ#ҋR;'ĭWn=%TkTRzR*,'!.إ{J&(]O R;I~fLtBJ^K)%#1yor^ۑ, E9E-9JG k^I Bxq]n6]uK\~ڝlID}ߥnT]=$tkC[}Dmo]a'D[ICA :uVcʷzضDLF%' "*)BU('8'9q̧Jc.vsCyd*8/yGE.lx.ذs`Fo4U^84)bGW2.h)s2R%+Yu%RUQ> 4|A%jW*˴LV05~T+,ͯn%I떪Əe Y!.ڦ9*7H7]vQO]*Szv%&Gi*׷% ԍ{u~yƾ?bUjv<.W;=40»iC_59~NZy-oڍT',OL0U]׮tDܾx8V4]!h[oi:U (8:APL"TmXuKyB$\ )ϧоS0FK R3iD1%9Xr1spV7׶w]BRyF^yFRqt7P;W#6x1qgjVco`b9_ y)ڥvͯTj#Aw*F1^LE r%No~9Yyi+3bC_Z&OW~Yz3!(/D-5; y Ƙ]vIRlhR:hឍ{߾.=+Џ-jZ` 7c$EpIэ ZϠ~l=;_J ˆ ^McoR`R6ؿ-zżߛƖ"Q-SUAJ&u(A?sijv@KP׊F2J2N:W돋c1[Yj3T}`i)$b{7A(\{0T*Y5b*xE$Ts!4Na91BTK)f1Fs.qX¥"*HG|"|D@ D h@T(KtCEne$GE?:49!Jp?LBdJJ@@)3)(%oG{Dzcx0 39΋.=`o;ճ\/oﶃܠK(w&xv~OM#f4>Ci3p\o&Mpw:RA0 Z`8>%GiϾ2('(|f]fby3CqLwLRA%e`kHSniX0Q(Z]v/ ¸+0섔.K5tR*UUR'R %{W_Bj'kNHi=9= % `j<@,I I0@%Ӷa\ 2SFRIy:G,Nơ5Ln_&_yj$67鿢\e4xs?8\v/IM 8֭JlRdzm%S-Synt3I? Ԕ@Pt{7E&fGdW7Y]΢ƚ"Dq!-}Q㶩ލ!N kdJMd4&ir̓hAz`lMuzu>6Kƻ>sNrLNƙdVN6őfm&i:| mm(24r9Qk.PovSWr !*=Kkk8ם?8{CzцIp׆ y mjAck?/R(?E\q~Hx+alSfz~⏬E?R2{Isvjdjt*~q!m _\!{n4((IW߽4{K^::!^FI'n ~swk%~qzsVMwSu|s.usvXZzsE^ p/k5E$>7Ǽ"1!ʼnn>zVjzN[EaCVB$֋VQ(p*ZQzk"DqH@ [h"G}t5q-]p^eJ'^bs@z3JP6;S%Y&y̜ OrOBj^Fv+L ]we~G~8}l6MɰM hzAsNMS wt"Lb^Idu+*Wi r b)3WGM}^UHdz>|/j4/VWݥD+ݩ ,Fzt@6|6I#r&5>yqwFw ڦ- ,:s!.-vC ZIf1OXV \f]L16j涅>r b2(=pROԷ b(e\Sz`I|둆__x?"BȎ_oɣ1w18̋emL-zm6Fml-;$ p(k 09kbM. xGe\+[.t٧7gSw'}`5ʇ*$5(.DHXsL!ed,P ,Pn1 R *9%`-ܢ$)ȶI@WoRdBY)h#(LZ hRVdȋ9 9YΙq-He rh-U9]EzP85Awa,i‹8zHw]r !runJP=|vϗ1``RT\rԡ}gRMPpkYJ ΗjaY`I? I5<8“ˠе(p\XP #P"I4W3@² jQ9ibU/IMmNMpmVUXHu{v*R;IU5(J4l[ɭ+Z@sC"%%'}l\-ә:zGRYg,}:jSgO׫OTP0rͯ?em;x|9y{';sf~6{hv%xvA>Jǃy?Q:?Կ}D%n>F4,ᝧUz^o~^UsdYx~ySN0]}pӝn4xT%~^.74eBg'1t~n\ypt<8/[)lmnwumzf Ԅ~6$TBN|TykAg>]&\K[ZN52-ak_zvZrtɽ7y f}e\jKŠ<~,TۭN(;^M_5q2Knn<}Z=ǹu6VjnG;-?r7\< aZagbgnω\_/xpw_̵gJ~Jlg*羳4䝫hNO|[֍`b1Q6X-f@Hg->9к5!\EtaW֭Zo-Չmu;.sѝuhݚАw:SCAs«ӇMsGf}wt[p% e4H Ȝr%Y^irg(I fI.pSDNل/ayZ5n[(mJV^A\HMTFY&j+8s6ׂ iSz9N<87@1rN)+eJDJxV,ThPpڂyN-X (IFā*ȩU5S HG&ݲکQX.4V\4GUzl 5.X5AX{Yc7w~pQ _d(|k , `CI Nf­itL˗Ý,rh턓n>l> [c)l^ʌjCJNa%?du(9NmZȹlj;TK@>9]Aw]@_En3ECY dJ ʴʺN^WGZ8ry[,"//5.7B w]v\ʹ#]A;Kq2uqUmyCqݻ v.ǂ[ËTyc)y5B.s9!TUbN{IA*{.+ԼV, Sk,㩲`FԽq+4IK Qd ԾƠ)۸ݱ h4PWVE3WHC+.Lk)i&-q^ 6t FTp&iA4@ej Yj9njMi('ȵAcP9 # )Rshzm0jQQbH&FF#K-!@dm?M`L"K EJs F#~%QvU8 nA4gp@ #*GYc1 k}Fʔ$Y\Cu{Uή|J.e{}.+fdǹ98Ԍ 9$G>+iD6%P m92G c$ɴjP`"$Mı%R>ldXRq%{=٪FD.<#r y8|5HP(oѓx8L{ؿn߀dԟys$:uRmG/zUEB0&Z4v]S& Fߨ$#hTkҰOj2CQr<ը$ ))愡aB Q4uSQ]SuTRM$ vEM ?vh9i2$z6;hs|~>(uzS gnS_>J3`;X잃otOɳ>*pCޞGD-iiOqjP1'-C-g #ݙ+:S)Ȁ3?q!DBpk iTI5h4Qk(ɨpG!+PJnŌ@ ݻ~=n7%7J-1M+ETjMk<[^c/?k@Zֶqۯt޺m>oF7]W?zG|P׭5`=LnoyNNNN  pSr8Ċ^~{8fyȋBI_JmV ]QuĘqLxD4o+8Á`+{T)zg+?-ΠjgO-?qnaB<5\% f(3u"6BUB( ,n5??b!(qZ}Ѭ,I&);X8Co#/Jy,kUY[tr7E&k@-7\a S TLlrɋHD1($h3aM7,Pf"ްWl\[VтA݅-xh_FD Elx'pkʹMHWb`*+RR\pB#`2LzGܓ<2e*`j,FPԹ O j/c7B8I|)|*b8)LrcPMQʲ̪*ʓ MAUr^!Eus ٤Qn߼JF2#)*0^=J{_-IODe̾Dc$-&)fZVdT9. 0qȊƔ:9&q-2d:&W KC#e؜C q紿 tD90>~DH5޻pOޜEL*^Hs2 ԾF)!%Q_[(FBPO&8l oo2sY6dx0FB kZ#d0>lxcZшL?Z"y}'4@iYh`[o4ӽKq=J<Ұ4p̄gzu;cA0-; (pPi, m<b(ϕ!睛!YCt}C/B-Aڊ ZIJ uH[S͹H`5^aA^%c[xc%MAv @e .8/-O`/onӪ-:[gq(}zu{Q\m>恻j\|~vZ;*݂ó|^Bb4ɴ==e'UTF н-k'//1ȃA@y%_DY]u!\E[Uԏ1 ֖>!mɴ6k־Ӻu!\E8uSh`Guھ#ĺ EUڵukFiݺАW :e#m(un%՚ZwuT)'=QGg ,Θ[w2m cE]7g;1P !%zug(z%xAi| -YeTh)IiV̐t,<F+F!pG! In;s5 Xopg8{~njpP`{De 1a8MAL0`u`{oӽz2: m8{t02 g?^Pra8m)4Jjn3T['zXQ Վh8;ˠZroA52Uv֍Sm0?)n'Vrm7cA[SGoBTM-|/ھ#e8@2U>gBC^)1~ȺYz[[NwX.'S6=RuBC^)Gq>n6!cXNFtz/j. }Ľ̾UΒTKI5WIKQKAi)(8 >nR͡v0i>j_.ܒJ槥%`ZYi*(==~E-g-5G+ut=[4M){o-OKyj3 -OKKIM'Z OK8[G`ZJv-((Z2R.[JBKI|y=EA1x>nR-&Zi)g:Qh)T0~k~Z*DaKҒj=Ri2:Y9 -\R-XK%;b>\yoFp Mj\KRe.%BKҒj|ʞ[KAi)JMrR~ZZR-QNipSI(ԫ\hu Vp.1S X"A T[DD֧B \"t,2p^i^;}Ej#cY ]{< \ C'] E46M^dz WyqC1}>,|rq+46_.I2S`PHu0;oVjJs5HY{Ւ^^ҳ?OS495r`[HO??suuSx={e#z#Vv0y$`VRon/O|q{'l mk13JqY+le d DG/͵b㿞I%.5'^s P%ћ7}rΏ2Bړ:RhC{Vm$7E+,/ Cl61cly6`lZYz[ݬz,xAN#n o8VTVʥ|#TRש;%~bӝ;nt玺;ۣ˝ >_|S $[ .Uͷ,)DjagD $9͞TȁYBڑ=QSدQ!Ց$Xc(xK5abu<-"~ WW276j9j@͜v$1z/2 ; i3R:bJ̛ۇ|pͲ4}IލV'n:1gx1ݵDJh-?,һa!Dl_qz7Q2u51f>#i6aAQݲcn=X7њMHR z_ֻkU[_|Y.>]^b"/I %/6};[+f͍uOw~}UZXSr x/]](`z^7>=.^#s14XH, $,e!gi/^|g?. hĔ!D6&W1Uhuܻvf;@n {1ndzgnjn9|r{7 Pՙ(8/Xm[2 •N|=4 t-ScQbS JS1#ef^%F\8- ҕUVVhxR- lZq?qfn|3_@is?[9?=AQEbsw'\%߶'b9Lхhs45,MlɠH<%("B"*SA<(֞^6+HV2 ]$]0NVK@\xpj|,vxO k樿I}9Q\\^^Ie *mvok+ԋT˸ο,oKB8&Q *љO|W 4BFM dWg PfF8itS $#">o!ڽO9NKʛF"8ڍO6t$68';7z8;`\ C*{Dp~zJ~9:`e0M ΋RLU]}s G X&F~pBl,x2H%F/_VkU! dhi2SY*T a-m,VItuRs)s!i]QgB%qD[dD:Y^iBsT[%vu3VJh\Kkrćho|gЧ՟g|&z iJ[0B" zn (V!Ҵ`ǕԝVZRwRU@9U{J /&%QxVо^=mR;}eb`*;F=J*<qpŤb\[WmFf hŭL[3*!Dh<zt$ o 48o牣Fo`[_/f49`=$ŊJv\+DU7N]@˹DF30փbb"$JkTV}#o׵2$Yib@ݪ3&C%*{ffzyN"cc}c=APTT(F.Dq5n.}/Gj&H=F+9`e47Gf&Y]]BkʝjSuq[ۣUJ|p'-Sj%|n}(6D`ʦڻ_ݥ+V`AO`4Rr :\%QuɆ=wۼU8s̮%JKTɍ (&]gqw5ɣ!;VK)#E\l6faʣ>>^]duALgV,BRZuZKj19ܖ_hgm=~UO@B>fT%>ֵƎwS~iw -W161`ZYK"[M4˦- tŹݪz\ĘN3R۔Q^C{Hև|plSBmeǑUįv-I|FOp R2]2^ &uodr8 B9gn7v7RZRgRkAb+eRi)Z%@hQ,LJʢ~ k-v;*tVUBr/•se7|O~/$yI|λ`Uܺ?z݆uKcX2ȵdT~;;\$NFÉ0BSJ?VB +8^$ Ć"j*"U~2Sٝuߣ(J1ZQ=|w(k{],=R|,Y."B"8eR1Abh`y9EU33_]E vlOD0c_mHsd* 7–s 7&40R6Ì`~-2i"~߰PP]2(Zq p) P/|\GtaH=0*P~,>"Ӧ/|H$l 염q$&x"# eS9^XP*釪<Ƭ5*Ȓ g9u[-Vd (44 4ML!3($ 9:T-TǮrSdyĴP%ŬT+KI,+)Ħ( .QaA^/w0,G |M=Z6W}@ LOg13LE\ TIHI2?D/x!Y^8x%-C]ZX?}2S&!2X`JI@#v1ٻGh%Ifd`t10ۥ#u]A D%c;ī}QN 0,1:!yǏqG)'/j&Lpu*>p6GOXWpбPnjYmԑ9gP\qk}\1wh\Hі_b\Һqum~j N˟S4s# h i H.B¢8I)}@*"dsL-E)Aȟ7vX"AgJdj?H&l=PD$:/4\e!_bwacfV"rhuNù{DaI?H ,GF@`6Rهţ" rXAJLJNctIhjވh`f-8Ƒhx0De;Xb>&JIw +Bɨz~9pBĕ)uQ8ÅcZTqJ(J(XZ6MT-ۂW`:FqsU{w:YW(KplR .àQRpǐ,9ad= !1y:TTRHiAU`qj eCLZy_)K-H80(P*zSVu}>ʱw]9F#tJϸcg׳uMSm~fܾ=,tq Ϯ/O܅Ŋ%a  i!hkmHdהdjn%QGD@EQ͉@WE:YB fONJNosT3 ŖjĮ>ޓVA9\p,9w;"X5Y2g"~7؜_T}GBHw.0J9aԚ39NPꉟI7md { 4^,1v# ^Du*|EQ&'e5iWJ:%p띖W 2`ݪDuu#Ƭ[CVqSbm8_Y73 /hݪDuu`ތn [U4SPLt!q)QCgLL)BKh)aotDtޠBx1l I+!TCmO.NQDab[f)>Z* 3 eKp&rZ* 3 S!ZE-5ԒI"DBK2R =\=nm(TyڡQ(W?eQDz1!< M̛m]$'ܽr0íBs=WXZ b| )k8}\@h&nVu䳑 -U dl@#ﮀ#P@,[g$tH7^fI*/ &1vf?Te1b{?L9-S{/) evG ]E".NriHgM.?`}5 K 0cJ6u/!>T6]<'.t=e f\D`'R?ÈY.':BRp%lL}$lIOx 1#D:G(Y@"VN Zā;y'F}OqGm=]j41G]ϷWwQȐL4Rfè. }DxD !G`Θ0\.V:4K!9hdT~Q2ÂΚڵ !aPүb.5B=N@ضD ]#m|&9 o .*5v}~imq>%0i끻J"?{>a ~߭1TD wo3*f+7fheڿ}wWʢa *\LBq" ۧۿF8H^ v#R#C@ ĐPFP`j>W}ɰ W!GDTۮ?;M[ة04|cNI-uW'ϞFO/u< 5^ovvm*{lpXbN.|_@{&@]5F0br'h7I1ܷIK$6yj;) ?XuO_.>_ݏx䣛ʋr20oLRxLىaG %f+^ml;c%6gbln𐣍eH-?o!%T5<{yrEZRlJK*5WRy:) >{U޸{Һq)QG%1ٲWysR? ?9 6 ֢rӱn#AJ#Vqt/PP=21Q%V0{rdo0rC\'*5Hu>_'*MzQr4}9xjXY$1*;6gȆKv0;=\ $)̔H -<qՇ"nuP {]2[`k03Y+"LWy/BtO{y[z %>{rc9fjfYbȮe8`FR[>y3I`O(dr.?sWEblq=lb.o[g5oE&FcN|鹃b^T(}^2VuPd&kXDG؃13>}`Š@R;Sz} \g7G(0ʞwtQ4\\nXIp}Ee#kN6aŦˮ2%9S>*dG~) a>K(8}LWB>͉>˱fɬ5KP=>4;$}vX\胮 u=0Vs}>ت(;$-#FڇfxAݼУ\0Srש9'x߉M7B 6goffF9 f#l_z a3מwAQK4gƹafC]jt_]-A@2K)%sɫjop]|@|q(HZ]0T@xyUJF! 2|:%cF>YU'o.~Ξ:yQ0qB 1"fŐzn5D/݀ ~Vf K qפ"%ah\lj6h38 ~C d3=_ﲝ C `@P= K2Q؃M+/Xb"AHk 62.k刕jUsJ 1@@>8C#< I` 52-t1b;YQԽx&}FԈa􉛇F"E\H6pbA#AeXs]Pꭚ 8 ݝMXipz |*-<ƚw${;e{&h()A)T_'!^H 1TkO R± gIkbn Y)b d3h'>jG>xW?&&a?yGnjOf/]'6ng *q>}4VitE΍z]?~wFt2 Td̵\n:aQ65fhRv'4gҍ2^0BY#l+ &^: /lQ0ydXGP%5]A irZzqSd9b? D&9W7_9PF9b7ߎ {_q5x{U./Sg9M*o n? W' M_43d"Pr|{}^HPمs_y~{?yݷC #3'k!K>'_o|oʜϋo׳h?۴ |M!3|窇H<QJܿ=n,RL gۧF?X$ jo,+5=GG6Gf2>?9ʷ~ƶ\([&WP.P^EV@rF 5tJ %┦ M 6zP4A g! m%KSŒQ!ad1Y5 Mm⩘ᶝ&u?=.D JTY $Z0Hքf8jjc RxbbFjаBYuq[UH-ST }..Tj 0R *"u,ZS1ZKCmK65k$ihtdYhA:ơ5B.Sq1M*7Q(%2RO2@*F2ZI&H$1CQcqJQbD518t @D& !艦ƄH*ac.̢a$4 1jpm@{RʉCF7.hy0P5 q14c@ ;PFڜwbNx'@I)oSyH)u 5,\J5BoOh1ἴ;GIpnѝ>+aW]^x9) KPz:Oϧ.HkBz6ٗ{zj{w tAOg)?+~R9Nםc0R&3G&&kO^ P-$ \Y9bs$ALy> LQ`]5&uyOsUPbNiS喪_^ɪUJpxSUja\~gh]OL[.?m\ י1~ TD@\t`T5ߔROvр("(=MƢG(Hh(TDbK#y,a$YdaiRJF6 d涜*.58jUs 5kEU6*Sj(Tet%&Q-JJWZ50jwZh,Y HAKM% Ą&EN9U=w((q5CD(]Upu'9#a'[{k9Ծ=)T60 # eT0iT(4JS%0:!:aԡ!*m7먺2! `6"C?-AN߯ n"7`Dլ+o3N4 (" 9'IS Z6STs!p[γ=o  PةGvB@V!dɮ⮃GE6| #ŸMyxC2,LVC xDZx=ܱS&e9?J'*qO%},cx < h9L@cD yEGɸ uКWL!N!g׽k^ԸKStoo@R506ˋ%rV ]LR G3X$+Nu+qy "|U\Mum} U1 f J_u wBO]Zk8:$WI{`bm|d.^so̻!GHZv_; ^pj/!!%^ޏ0;猙lBYOso+⫃OwW75_|g*W8Ǒ%喅@]6{x׍וG56Dw-:Ya݉d"ۄZ+G.ʫ$34*֌ pdJdsa$Caz ^MY3=09' c=qng_˶QmؖlgR^.V;=:WT.{2j<Ȫj(۞Ш|.v+D?PD.4VD7c4ZJQ?M'b5b7sJ VGakJWH$~j UO^ve).7y#T$bKM*a39ϣRpp Dzmw=y]!kBTt2Ԕ vV'NFQ4>B(Fab4J5sDPPRc \n`i x"D B IQ6y3Ԯ 5G7A_˳[yzb%9<=у䞼r$P x;Ux(ę똩dSa2 g_xj="(FғW2kGßu 50qnwY+p54}xwqʾsEJ:~zʎ;ht:+{w Y(98KGwf(F]@e*ڋGF dؤW* u;jae^tH̓,ˆjch3eY./[e 64,%)4T5u:Fm#m5{VNÎ m+4Wt^knwB3c6'< F Woka̪`*U.~)J ŤJ˽K[gj0)e^$߲ %ÜƋvU)N\8AۏZJNjӤroTJ+cK͢mo1u󗷬䘫H*̿}o2+Wѡ8H Уחp]H  ..UW "AYCX]|oH]uQ@)k*~#dwZ u8{Ļ~MK ՞<06q}[Ӄ]8w~P@#Z]r.I/JF*OS`tsm}|ZHm~dAL8u53P(r>;:yiBBSUPhK9H؊TJikSĵpI;&")c@J%I/ :\"rf<\=>B6۴(F$%"%G_Ɩ SIzIJPÖ$8-M9 -+DMghOqk[hKq<8.< cxTN}DA@!ػ 2s=Ɠ6,,R0 %6܏-!BMOk9ơZFUfGCdTMH`j " iVUd' y>#y)p[PgsB4Tx0^zay'ԵQR BEWR>OR/;┴A`c2H#2"E%J:_ѬX1X')3-h*Ao 5~\h??vGqr&be*b.9y*s|gS+.b0Ѯyka&E5a/ {QM؋ {?@Y$mY.E*Yk!Ig, -S[Qg|p6g(˴(u}85=< &H\;Iôw5hvǃyQ}΋s^Ts6wdRB'T_jE+{\Ɣ|!@6x4HAY*zwvǮ/_-"$Wې23h^,UX8# kUH6'mZb˚ hYոWBc6{2UvJiY9%7IPvYH&L)FnJm8}a& @XΡfc4ڟO%b_wK mmUS@X%I" 1AZŋV.\ 'kAWvx:mmE<ԝDN$6ҝv!A+ԾGE{ g<هB"d4y IČF0p 8ibӮ\f$ג DTtu_$*.\A~ukNrwU7U7/o͔?MՖ}wǥ2PwvNOg:֪q gi^OS.Ƶ!Nr $QoǥY[4חrZT2hT#MDÔTLҳ%VfkA@{& )ܐ[F:6DV:&&Ac6Pl^Խݨo2 =wCT!J si$EE(v:U(d[Aΰ9dEEj*lYk.^z-՛8K)(K6Dm}p/lL% 'bE$D dݵ95_iZZx/6PAjohUp:#p6Q/2P1$ә]&ɫ h߳f2 Pb_scHԸ#FiZj#kH={`#FsdZ/y9XS ٘h*6,ДE-UjM)HdMIئ{^<E}P40' PT&\|&#z@>#kJD^ )cէמ}>っl{|f>ئnV=ZTnfɂ9Ә}׉y$%2 +o%!C Av;T~6+mɏ֋Ad{{0!Vïǐ? {΁8:LDH&MK-5/Vͼ[#GBesvHI#LjPTV!&F uYkn(f:[橞V"Z+N_{',Tkc _ɞZL)~6_цHee Ip{d͆O';9@h5̰d <9i,:_&+f;8qH׃uޏDEװW!"-kp2$tL`MܘSqo:Bfy݈dOb45f„)?Bsµ6vK7%!#DOS8a@c) xTz}(i'nMk?On@(?q%BvU|@l&m-,v4Rk9JMXO0a hydlr&ijTL\xtM=k!Nmh؜'AU*RH]H(6kzђ쮆|xf\f^TeFm,ѕPf \bPbV¢v3B'O۶ /F[R];5ͨ HRq݆kd39a}On>KK l xC/5l1+/F],~ &8 79n5Em'{ؒ٬9虔*0̒6"ԸS޲e^A܉Jl 6bӲ(<,YEU묶L2j59&\˥۫QOAS,~4;aW~Vҝ&J\ؽOPٵkqcwuee%:ܠN]I`_mѾu ! oy,4[LŰ_ok,DW4|b'/;H.F+B{ Z!:@aF e|=_n$H򣰫_O2ܚE% ty2B8 >Nwj/b&::620=Ї8'K\%YJ YoRȑ, JV wd,d`z|9Y d+D@FIE'˼8dōG\!{=YrsN+kn#I7nQud֡zb;fc~S<c@l MI11U(zC30"sK\ Qxԕ%.4'*ע.D#$2!|.z}"6HULa/nr! 5O!@28ɤV$"B$ !#k| -K}Ffu4 rYfjɥ_y=_gJXbvdF+Oj1xRnΩ"\@YΑ%%\G&]JAr8[vs^ n_;:tMc28krA'蘵DHG$ d5 /-3͕ k]!ڵ=Atow”60Yn@$B3s*xr گK RpCHI)\vw]ưgЉr ͟+ɍ V}1) "xBuq "]oiPux>ԍ݄CTmVSvEhɒYH(WʴLri,`JP$e;xp: 2PC6/]َ~vL-]\CؒW2ș Y@ayl)3*><|t΍INl4پ)+ pՔ\|9!rg  DcmqC0B `5- 2 zOޏ k`Z8?iៅ0H ["'`lOm@\]z ;r+cFnS\vĪC `"tǷ 9Z"מ^(̕Iw8D_zb ~P(#$FgbVC~Fs c8q̍6'"7Ȑƚu-0li{) 4΄Cb@:k聡Ʉ"[Q7x0S3ZhXS1dUit1͏OzQGǵ}}7~Iek@*'F Wpi#uSg&ǁM~Lӏϲ)=. irAs\ֆs$XE)utqğ2o7G<I3wn pPy: RtÀ 52tY͜ TVMFddMɛj JbǓ?s~5;vue#%"gNzda (+,^0hJwrjW|ݷOGvZk{z/czЇ!mFIMs8\N1{/gzGcb# O|}s^fu= }4Sc-fC/j\?mVIi.F??-rЙ„s4)"^D2+H秙//9ҧ3#磻x|DlR2ԉSv!1v%rn\7z$w䏾Ց(Iz裁Sp?)"E f2?!2]ofRKpёهFQH*G9G&&XPx"{+e=hѶ7o]qb|=}"5~x&y.M2.0沑"Қ~4ٌ^(fHe_c;dMp>;۞s) ^YB e$Y&2|T"&z켡UL啳9 ">㩛Ǟu;yj0"ztsؒY~_VE|cT'u#vp]hgK.5_[~u'Nz|TFדW|݉BJ壳_I]%O>^,lȧhrсb9qs~NKZY[N-' 8Z G2_7_tNܕ"H YlH+B;n>EKvu:;d/`M 9f *[Ms=j/XI:Ld#u wYڪiEǥŷT?>;|AWOyȬp?˃J?g5vo7FCIVsV9hkMoUX@i. O\PzQQ* \Ilr8p_6;3iMIP|LEb-P04P֨ dJaڜ6=5’=FLw-kGabk^8)lLC6y9#$t}W̶ QY A?<֑,poouт?j oqfq'!?3K{%d :IesbpUvŌnqJj:J>9gu83qXCY3FFwP :dź N8y:*qĨ5f„) ,uSdڴq-YgB5F .TCDŽBv&&Sr' 1Hes8b󺵑6Di֨Wm)d2"׌$@RdbeZu\7B =!uL[ bPb !@&!TȏsU: mkTAbl3#yRi^ RLȲAK6L iR(l9I&n;]c&=a;ldF ]}e"zdQZp4d1!!C8mOtkRq-BHF=6r@A. p&=f!+n<*b!"d2Q;JE^N_>O>lnԡXTD|A&*ri ڤ-ق"zެT,HB`hPvnD'\aINQarj+ +qt.aY(QϾyWł6:upe2~Be8WN@-Q"GR&rI\EX /{R6IԵb{rAd۵ezh$\k3Sb}ض-¶ Ai0)%aB)VbLep*-,PV4hD;`\a0NunŁ ɠ]ts/^A=mW=vcz9_i 83CޙxFݜH5I^HdIb]mT̪/#2`èN\OU+i*-@K#E)hA6AfcU'zH0OYO2YiiF[E"@)C"$PQEmh=#ux$^!t7f樚Lo}jM>_@TB$ p]5s2x@Cf:AsJ9ń1Y~.6O42yQs&Π dHn;LI\*Jfs2ZJ)q2Pmg)T?,ߒ I2Ңay%.^R7f5+4Jo\C?p&mNeN'!rrKOcpa#YYQ,2F9PNGrgK9l7)}In`K `3RҎ>A`l)-! dοlץ6IB\1 X":G x-QS=E\p :I鄤ʼQ:E$b ㉱eR*buo+e0s@`ؘ}DX߲IlT!guv !h_js?##X \'ȘGKo)$hʃһ9Ijt4dr ꪪYlD1QfloIhx4)O,,<?b&8!qN%=8/PXzߎQ]'GT_^PiA"9\dbt*SPw0/%'viLgd4o1}1t֣[$Po3:0vGΨ2UCUU QE Or!J$DĠ:e#2&Yce:7 xن*wFO'=f|2o|O; +I!ABHՓgӼ*|Rf/`]fD2(DiN8>[*I9fEKU .:٪^t@ssjg>PA1"Kd1,E" b8l z7=+d\_Jp@MV{6H8:H=jf^\5d;5G2gi>ASJ,kZ%")NQ>DQP3d b-mʂݧm}L X?#vvz\T76< 6JN:p>8 ^iΒ7ы#o9*9BERfK\OZtIz)W#Uw Y d}xQ ߻3}gxQMI&/w2\ͱzP@+}t̚uC* HmLyŅ5IV3/K$(dix銧%DvT*p'^A$cH}oI";c*HGvv鹩&EG h IMBMD*ѸXО'H%2@|tG64_^Yj2-]<Н,K#k)DxA-} BE:aD=,3|^(2ITq֮u,0Ϋ$:gV)*1{bg)~t["WmgZ=l7ׇx}2onGi4fNe3ew̲urXKe&!ZpHʥwr"'Ü~fipɁ)z<:kDuny߶%: lUi}oJi1jr=*>k- fQvr@Q^5vl<*#2{|;l{~[A>;}Υ6 W9d? R%0 z8,=QfUQ*# 7i6BH I9l6N [KgN {zѩCK C 5x93ďWJ$ʫL"A!IE|sm_>:&Vgx텻]ܲ f4ƕ[[:2#gd<9[-Tu[UǞ)*DfRiP l?:Na-~lBW`46w%-"&K9Er5mk!QЧ BJn?7p{I-' Nd \0}OqǙLqkǪ{Ȥ={}h`i\8@YRGp if9/renΏw^uĸ8x6ϰ3Ny-o3w6joIP"y$1QAPaRAKqcNHfVLdoΔ/=TXQ Jhլj#W^48F(ge20c|i+GXΰv@L0Oy:Ր] -sL8Ė.BXj=,WE}z@T }8mRԁ\Tҭ>ޞB!i/IQxh2L+(Y:wuI_G?OcaW43l_%ʬ?FA>Z< n覮xr 9L~lĬrfq舋.h mQPDe@,G*(bمP0s4([Є1`3i#ų-0uJk%6)Z1ˎhjai5lis ZKg<,̴?8rE#nrU:ݲHj>r;֞rk^BM7N$k~M'!yIiD03팝[ψ`kPCrc+. W.F~)~e@khٍuv;\;a<\YFEW3M6e3hqy]ű^[O8R+d໳?- 7^ /8j/[lMJLӾ(N &mU޽xzW:ϋH+*zJ,?Fun"Ykfޠf|@F\ TVe܊xKeifXd+ lvl$=vlfiYtx_NUq+shx`&4$zORNBYq6q`z38u0S#Li]̬mG[)ޔ+sZqN[@Zs5OEOh}H{&ˣ!r ]Bev!-5Z5o.'6p1s3s5ʄfpZGpLi VKcTjJA۽zf)̟QZq6ZC"놕xt\Vւ`>]mok[uMXdϘ؈`T [d|Jp-Fex_<B`kX©FD5>#kz>'^4o W0٩nz]i*eV3̾ _5pW43Ei{ 6)ZGnnpР璘4+C9JOtT#_&_^WDu^NG9=ٿB_`qtrm ) 5=uq 62!hcIhc)] PpCr䭵[lF^=:iGѽqHVpDH~k*"'Dhp M*\)Usvg)/u7W1m c$ڷo11fm[LYL$H|));|[Ǡ,k&vw'ow\ לk}@;mG:B| J¤uED)[ "Dv2t۾Ez~`{4^v?ԬY`k:xt=8J=/p`'oLSi2E< ,rL*QꂬLQ!kk٩xgeD6iem"ϣW!15H71Ȱ``$gj!lC۵d S2> L6< ΁Kh;L߮=<ֽeK&M,=IR[4yS<*Wa ;h>/ =/'(PLyg 7ox~6F٬7nY#3qz?a```z$t< E#@y{|IyRmGx¡.rvߑ.c]k|L6O?, B\k E6xgӫ5ݗ;KfסP2JBg.mzS9~0h~6Q~Qbd_'|*ʮv׏\-n=*^w@W'JDN\EcQYi 7_>a{kekDˁw *2LC_bI$=Hi#A֓Dcؾߎ^D/iApOWii5Q$"2$BRADi|YdPʜ3u%RͨN~% 3\ppe d/e TΎ3 h*?OwwN֟*$JS^-|LL6**W"7)jJÝ/E1Vg52S\vWY딟Zݑ]OW{+c&?"dnv12b~qxuP//k+k8eOV3̺AH]GؖBI,qHno @6~pP@;ˬ X|K'Fg>^Ңk̟=5 @3ѸI\7l€%@dn|F"z[:e0@Fuΐ g;?vGRnyɣNGC,gHMB7,qƖ)(¬ʽUCOwZ Tcn8[\_`{"t὘s{F(B7g3  I9[Pu; 6T+VD8SdɈN@~o&;l]/fp3w5>k⒞QC3 {,ZF(Lc m& g+ERJ?gSgD46(0iBsHc7JƠ! U5^BqĢ *5"*zoUk^}~.B | oss_+AֳrK_* l_DY/j ֏_?Il.(hY({1 $|(q=^yg3\sAoU+3co,=e5ւRJ 2yc`7 6~`L*s+ X]mqzNy51II1,4DV&HJH*e: (}`TQk\yz3_*7'*\6,A*OOnEoٮ_-p޾0}%k^)b ,rN%Ŀ-`L~BꩀKɟoӏ_`Ur9w i(x'3"|19[}sXpTF}b}i7) /sOeaW,b +#l[b7n|)QR@fl)Kn[kA2PY9@]H;! Ul͈satwʫqv(}"-Uk\jXE[NH%7URԃz$*s3R1_mmc4g_K4TzPBX%u6 fT-HBܟ\ Ύ$ȭ`-vP:uΕ|d2 &a)vx˝;&D t ASP?}l3+fT V.jt|= i垐Oم V RVրHeՠV 8T9N~Hvʽ A9h8KaǤ't R"+B΂"q!vPX(B! )Bgg[}ꗯ]TXX4"*SȤf;PFny 7 +-M4||Ԟ__F^8^:]]J]L(E§qt1urF's?0E80΂4R,CIL XmTjvM%f' ōQؖBA$q{(1̕ 4T)lM:>)Q٨<,ΪܟpCPu,Fw&Ke8y&gy'\X_fzMi8?[j\tvK4OnLj]60FNu`oB'#oi f %w7j~oOm'H|{}/"%>)d+%u5m˕o?=|wrl-+~ y~'dnih>D~+6?=:7qG?_Z#>rblssqwc .#dJʗ|g&gu?}Yorq~(g8 (p 3'n:ĭUey™ EB>Br;uBBj֠)BI3yimFCph\"ˠ9~ЭUvP @8xYȬ,C]2Э s)i݇>Sk7i&j1̈́|çaqLYʹKye:bI_ӣrΧ>~bN.7S'} rHi@%[w_uTn:#1}ѣ2l}qp^O'Ip:'ls~\r/~{wrՋ6Jx~Qp'*O{!c U - G+iJ ǫ` ø ^3fCmH={{E i+Q3vquZ抔%\kArAyn&bjY%3ZF;yzZ%ofk= ykԪTVa(ڪ V4s݀I;oUTґ[{Ȁ"]rAJ sv? sO.ʛKaN'J1j & q`gx">HR7)۲I,bo""~F[EpNSQ@4d>z8iCcwˣ-P(R,.0;GmyE$p^PD딒K)k\Zlhs V! <(<k=(L1wr;[oYPwVWP󆍵;:$ !QqI*>3H BMN+R *簘pZ^qx%; W(dx8 Aud8%{sRd?Z ϣytS?,Vn"A.;#u7|ɗ,V o ^p/,y''̷ }+sV s⽹}''w2ºڝܬNk8.kkw[cRTf?wu !-KMpOӫUݛj{9 րN$Y5 Vcsn&r"Snf(^Mҳ*TI5͜=b*fαxR0m 9h#RMB+:!u:6>$"Y#2LHl*143am_$7-VU.\FG+r 5o?O}S36wNxiV޺7^C!:Z<U}Len[fzQ)XɄulbTP"yvXRrBl|5`]^`VRL`ƸAJY-+4EyJ1OAM :ƨQ}hޓ%Tȯ<8FI6$@AtZ&oڣyq~v̢ib7r5mBH_ٓ+ "GV5k&rMF/Mbeb2x"ieniDCOY_RVjEBOEq4AF>8k53v,PڱG|Jw)kʠ L:> woWw{CYB!Fx$J椑͔w#vDZ1;S؇iu1<遳~ږ AF7u.093z~*74"5~ғ6P}F[q;㣛ՙ7]Uԓ׉[Jexh|u;ѹuժU7beSK-{W}0pP`4tϞ =yǎU'seW@Wc!JIt=x^Qo(y-u~=s(rr??\,OI|?xW/-" {& weÂ~"1X:g=h %^6bcbN .62!@G W[]sPrQ6ZqJQA+ka`TV[ߨ| ^9506S:L'ƜB. y(&L:k5tuhG)6ak V M"7Y#rjh56A:TW֬fʹz. .  ;(=]C]We 7]뗪RG)͂u+h/tY%aZ*LeYV ѺYVjޮ ]4zּU3/ y`۞|FMeswXR2D3׶cPQENgKS #YSj,IBڨ,"#zZgajXhҘMY@uH E3ӊFrh;2nmTբ)6vhw/usdu`ekbi*о+ w1ɹuXR}C gb>5^PM5 ey&y^N.&*YJrk\y>p| c\ْh{Qv'z̓!FmW"K•+g|0 .Q\[at/WβbRhp˱_9d`U\TXg~ 2~$9\V׵ aI7|xk$ir=HZRYQV}\*qMKoX*l!r?KGMX*X]lV_&!YD͍̏׏ϒ C ,m s~yt˺ Z\# 1dE ] ߼ IC`MCDdsӒJ aA=%!=SV:d)1 #Jy} R>#lP\ǞnB_vܩSIS=J*4D$,kl^8x}d8a4tXrX[UoBG~x^/%6чBh.r 3Ђ6sQ7}Adw|]RMT?x?%yGٽ`J!dMϭ@u\( +W dip<I X@tPxf;BZ $ ?K0{B-TXKȹFcVZiBc <@`U[KvjI}F .Zf}^Kn˞vtj摄Lo#6I)?@-bas tH'NN \ݼq>최ţAGW95~cm@Fv.wmr6 Srz*?0isΎ'H`F3h7cYgk]#FZ?`fk :&AWeXLO` @zͭc7u,J{1o,( ( Gpͧps/R<5a}g;< С ˝ {ل;J>](,?nV$|u1ĈxJckw,u\_Sk@MpNJA] 6G~bRJdc$zHCPBM0,+(ϵ%rѰg]%6'n_a`feʵ+I|&ׯ$ i%yW㡼Z2.n j(uqx!|9CMfK%wBBJK*P638~/r"o xB *^g=Px)%u; ^& Rnd`YVnLXi,lY7{U4oS0 ahOpӛO8cOrN290w4sI:K!خ$R^6vXȋـOYy GSS F 1J Lq(}Ob%I MȋvTEkS{R"$ 奈{>Պy^FB B iNQF O"H@ CFZ-7ZG_rw7c8ʀ0Z߿ 1_| g 0Q zƋcUۏF|u$h0cP3|5eem[ !.%%ߊo<B{.vEk@@CN{;;mЊ?VbbruLʃwiм<]@!+zoGKF;B@ڰoÃr<0,.Tqk-1m }?g{, WǩL+biYn~9Y@8@oۗZqls&_]O#cbyjԊӼXf#Qwr#@r1 iN@)."EB!;Z|t:n?]IVE't f]wڼ͈QNpu|~yeiflpL21A>bf @#*A<_s#( <Ԛw1U$m}20 X!b]I%",87@R>Q`=,ꐷ2PmwoD dD1P"mf{a\<760 _ FthW"!W]BQ!3VrmmZ$S@g7m~u K۶rz۹QT71Wƞ43҈10;?{~r ;;ȹF]3@47bc0^FӫS"q~c7ռ)W1. Ufl9hE-su[BkL'ކa9v9"-i;ln.}evݪ@Fe8Բ=wF&cJ 9Ld8f7hQ5_Ǜ>yWsڂ^s_o 4, AU\AqLW ʁ"CebꖭoNc>1P9}hwxj4ml[s7V|_f ^ ãw'{kHk`펙=÷{;(w轡piW;f>qV20X zMO ?`a@L Ő~ǐi AӧZNLOR<۲ > D@<DZ}@X08T,PH@ Dѝ@GqͶ_J\7VO߰/<28c?a"4+yDZNF;',0цR0ĉNHHO`f-a!NX)GH*$X8"3A9 ðahO,|a1@ 6'O:,I#UNmȌcC7*(V!X4| LqIQ1+OI*d epR/i3';dHP3a^(KϒV60F琗ɨ3hgJ$(|hyPϮK9|DĶۻ6n +^VmSuj FQK$j+{M(rxx83t^{@ܻBeV2mzyc- jJoRZ/JN6Bi-Ҕm].M0ն-S`ڊ{`IIU k2!@KkgLR5ڴs^|Ϛ I% ۶ Cj1]&Di~/?.:y"-;(ï>ىYsw\3zB( E+ͣE5POj:z? 4@,ZxE,;;{~:xZ~Yͳ;%['zywAf?YU}rsj^y懠, /PR.14M.+Q>yS[bWNNgU'nZb1P'6>+޴[|R9#Ʌ0>p ﯯrW^>)kfevn,K*V;nE׹ *.˄j) *dѸ.dp$l}d/Ö낅a(|og7ۖ[(q6 wq%,`Bи5p#`EUqd>(.qbqj\w|躭2EcU1. nr腵w A@#~3 ]1WV~F9ȶԒER;vbY7Yn$jw 3X^@p-JxO\# $@^id]F?;q?BhŞ/8@ ^1ަ8)e|G1; xKQo"^ؙf)HK!}d)HK33KRNp,ızd)em7$yf8 0`8<#;t,e.]VV^jf)KDQKXIM#gN1@tB/tvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005525667715136451060017725 0ustar rootrootJan 28 16:34:55 crc systemd[1]: Starting Kubernetes Kubelet... Jan 28 16:34:55 crc restorecon[4691]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:55 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 16:34:56 crc restorecon[4691]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 28 16:34:57 crc kubenswrapper[4877]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 16:34:57 crc kubenswrapper[4877]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 28 16:34:57 crc kubenswrapper[4877]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 16:34:57 crc kubenswrapper[4877]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 16:34:57 crc kubenswrapper[4877]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 28 16:34:57 crc kubenswrapper[4877]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.019874 4877 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025056 4877 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025088 4877 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025097 4877 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025103 4877 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025109 4877 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025115 4877 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025120 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025125 4877 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025132 4877 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025139 4877 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025145 4877 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025151 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025157 4877 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025162 4877 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025167 4877 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025177 4877 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025182 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025186 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025191 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025196 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025200 4877 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025204 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025208 4877 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025214 4877 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025220 4877 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025225 4877 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025230 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025234 4877 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025240 4877 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025248 4877 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025254 4877 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025259 4877 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025264 4877 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025268 4877 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025273 4877 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025278 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025282 4877 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025287 4877 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025291 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025296 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025300 4877 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025305 4877 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025309 4877 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025314 4877 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025321 4877 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025326 4877 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025331 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025336 4877 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025340 4877 feature_gate.go:330] unrecognized feature gate: Example Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025345 4877 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025349 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025353 4877 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025357 4877 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025361 4877 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025365 4877 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025369 4877 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025374 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025378 4877 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025382 4877 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025388 4877 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025399 4877 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025405 4877 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025410 4877 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025414 4877 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025419 4877 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025423 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025428 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025433 4877 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025437 4877 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025442 4877 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.025447 4877 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025615 4877 flags.go:64] FLAG: --address="0.0.0.0" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025628 4877 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025639 4877 flags.go:64] FLAG: --anonymous-auth="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025647 4877 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025654 4877 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025660 4877 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025668 4877 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025676 4877 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025681 4877 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025686 4877 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025692 4877 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025698 4877 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025704 4877 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025709 4877 flags.go:64] FLAG: --cgroup-root="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025715 4877 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025721 4877 flags.go:64] FLAG: --client-ca-file="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025726 4877 flags.go:64] FLAG: --cloud-config="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025731 4877 flags.go:64] FLAG: --cloud-provider="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025736 4877 flags.go:64] FLAG: --cluster-dns="[]" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025743 4877 flags.go:64] FLAG: --cluster-domain="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025748 4877 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025754 4877 flags.go:64] FLAG: --config-dir="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025760 4877 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025769 4877 flags.go:64] FLAG: --container-log-max-files="5" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025779 4877 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025785 4877 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025791 4877 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025797 4877 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025805 4877 flags.go:64] FLAG: --contention-profiling="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025811 4877 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025816 4877 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025822 4877 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025827 4877 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025838 4877 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025843 4877 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025848 4877 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025853 4877 flags.go:64] FLAG: --enable-load-reader="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025860 4877 flags.go:64] FLAG: --enable-server="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025865 4877 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025874 4877 flags.go:64] FLAG: --event-burst="100" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025879 4877 flags.go:64] FLAG: --event-qps="50" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025884 4877 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025888 4877 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025894 4877 flags.go:64] FLAG: --eviction-hard="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025902 4877 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025908 4877 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025913 4877 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025918 4877 flags.go:64] FLAG: --eviction-soft="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025923 4877 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025928 4877 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025933 4877 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025938 4877 flags.go:64] FLAG: --experimental-mounter-path="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025943 4877 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025948 4877 flags.go:64] FLAG: --fail-swap-on="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025953 4877 flags.go:64] FLAG: --feature-gates="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025960 4877 flags.go:64] FLAG: --file-check-frequency="20s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025966 4877 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025971 4877 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025976 4877 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025981 4877 flags.go:64] FLAG: --healthz-port="10248" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025986 4877 flags.go:64] FLAG: --help="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025991 4877 flags.go:64] FLAG: --hostname-override="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.025996 4877 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026003 4877 flags.go:64] FLAG: --http-check-frequency="20s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026009 4877 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026014 4877 flags.go:64] FLAG: --image-credential-provider-config="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026019 4877 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026024 4877 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026030 4877 flags.go:64] FLAG: --image-service-endpoint="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026036 4877 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026041 4877 flags.go:64] FLAG: --kube-api-burst="100" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026046 4877 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026053 4877 flags.go:64] FLAG: --kube-api-qps="50" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026059 4877 flags.go:64] FLAG: --kube-reserved="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026064 4877 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026068 4877 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026073 4877 flags.go:64] FLAG: --kubelet-cgroups="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026078 4877 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026083 4877 flags.go:64] FLAG: --lock-file="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026087 4877 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026093 4877 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026098 4877 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026105 4877 flags.go:64] FLAG: --log-json-split-stream="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026110 4877 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026115 4877 flags.go:64] FLAG: --log-text-split-stream="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026120 4877 flags.go:64] FLAG: --logging-format="text" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026125 4877 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026132 4877 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026136 4877 flags.go:64] FLAG: --manifest-url="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026141 4877 flags.go:64] FLAG: --manifest-url-header="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026149 4877 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026154 4877 flags.go:64] FLAG: --max-open-files="1000000" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026161 4877 flags.go:64] FLAG: --max-pods="110" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026167 4877 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026172 4877 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026178 4877 flags.go:64] FLAG: --memory-manager-policy="None" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026184 4877 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026189 4877 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026195 4877 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026200 4877 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026215 4877 flags.go:64] FLAG: --node-status-max-images="50" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026221 4877 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026226 4877 flags.go:64] FLAG: --oom-score-adj="-999" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026232 4877 flags.go:64] FLAG: --pod-cidr="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026237 4877 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026248 4877 flags.go:64] FLAG: --pod-manifest-path="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026254 4877 flags.go:64] FLAG: --pod-max-pids="-1" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026260 4877 flags.go:64] FLAG: --pods-per-core="0" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026265 4877 flags.go:64] FLAG: --port="10250" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026270 4877 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026275 4877 flags.go:64] FLAG: --provider-id="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026281 4877 flags.go:64] FLAG: --qos-reserved="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026286 4877 flags.go:64] FLAG: --read-only-port="10255" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026291 4877 flags.go:64] FLAG: --register-node="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026296 4877 flags.go:64] FLAG: --register-schedulable="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026301 4877 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026312 4877 flags.go:64] FLAG: --registry-burst="10" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026318 4877 flags.go:64] FLAG: --registry-qps="5" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026323 4877 flags.go:64] FLAG: --reserved-cpus="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026329 4877 flags.go:64] FLAG: --reserved-memory="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026338 4877 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026344 4877 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026350 4877 flags.go:64] FLAG: --rotate-certificates="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026355 4877 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026361 4877 flags.go:64] FLAG: --runonce="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026366 4877 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026372 4877 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026377 4877 flags.go:64] FLAG: --seccomp-default="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026383 4877 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026387 4877 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026393 4877 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026398 4877 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026404 4877 flags.go:64] FLAG: --storage-driver-password="root" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026411 4877 flags.go:64] FLAG: --storage-driver-secure="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026416 4877 flags.go:64] FLAG: --storage-driver-table="stats" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026422 4877 flags.go:64] FLAG: --storage-driver-user="root" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026429 4877 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026435 4877 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026441 4877 flags.go:64] FLAG: --system-cgroups="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026446 4877 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026457 4877 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026463 4877 flags.go:64] FLAG: --tls-cert-file="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026468 4877 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026497 4877 flags.go:64] FLAG: --tls-min-version="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026504 4877 flags.go:64] FLAG: --tls-private-key-file="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026509 4877 flags.go:64] FLAG: --topology-manager-policy="none" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026515 4877 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026521 4877 flags.go:64] FLAG: --topology-manager-scope="container" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026527 4877 flags.go:64] FLAG: --v="2" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026536 4877 flags.go:64] FLAG: --version="false" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026543 4877 flags.go:64] FLAG: --vmodule="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026550 4877 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.026558 4877 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026686 4877 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026696 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026702 4877 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026707 4877 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026712 4877 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026717 4877 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026722 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026727 4877 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026732 4877 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026737 4877 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026741 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026745 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026750 4877 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026754 4877 feature_gate.go:330] unrecognized feature gate: Example Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026758 4877 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026762 4877 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026767 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026771 4877 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026775 4877 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026782 4877 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026788 4877 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026793 4877 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026798 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026803 4877 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026808 4877 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026812 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026817 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026821 4877 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026827 4877 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026832 4877 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026837 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026845 4877 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026850 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026855 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026860 4877 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026865 4877 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026870 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026875 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026880 4877 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026884 4877 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026889 4877 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026894 4877 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026899 4877 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026904 4877 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026910 4877 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026914 4877 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026919 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026925 4877 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026929 4877 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026934 4877 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026939 4877 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026945 4877 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026951 4877 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026956 4877 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026961 4877 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026968 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026973 4877 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026979 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026984 4877 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.026996 4877 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027002 4877 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027008 4877 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027013 4877 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027019 4877 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027024 4877 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027028 4877 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027033 4877 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027037 4877 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027042 4877 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027047 4877 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.027052 4877 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.027060 4877 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.038036 4877 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.038081 4877 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038168 4877 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038179 4877 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038184 4877 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038189 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038195 4877 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038199 4877 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038203 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038210 4877 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038218 4877 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038224 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038231 4877 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038236 4877 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038242 4877 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038247 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038251 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038256 4877 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038262 4877 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038266 4877 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038272 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038276 4877 feature_gate.go:330] unrecognized feature gate: Example Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038280 4877 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038285 4877 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038289 4877 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038294 4877 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038298 4877 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038303 4877 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038307 4877 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038311 4877 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038315 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038322 4877 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038326 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038331 4877 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038335 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038339 4877 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038343 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038349 4877 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038355 4877 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038360 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038365 4877 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038370 4877 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038375 4877 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038379 4877 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038384 4877 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038389 4877 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038393 4877 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038399 4877 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038405 4877 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038410 4877 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038415 4877 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038420 4877 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038424 4877 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038429 4877 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038433 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038437 4877 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038442 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038446 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038451 4877 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038455 4877 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038460 4877 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038464 4877 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038468 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038499 4877 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038505 4877 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038510 4877 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038515 4877 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038520 4877 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038524 4877 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038530 4877 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038536 4877 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038540 4877 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038545 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.038554 4877 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038698 4877 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038707 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038713 4877 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038719 4877 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038724 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038728 4877 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038732 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038737 4877 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038742 4877 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038746 4877 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038751 4877 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038756 4877 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038760 4877 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038765 4877 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038771 4877 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038776 4877 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038781 4877 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038785 4877 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038790 4877 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038795 4877 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038799 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038805 4877 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038810 4877 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038815 4877 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038820 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038825 4877 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038830 4877 feature_gate.go:330] unrecognized feature gate: Example Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038834 4877 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038839 4877 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038844 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038848 4877 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038853 4877 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038858 4877 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038862 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038867 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038872 4877 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038878 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038883 4877 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038888 4877 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038893 4877 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038897 4877 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038902 4877 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038906 4877 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038911 4877 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038916 4877 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038921 4877 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038927 4877 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038932 4877 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038937 4877 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038941 4877 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038946 4877 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038950 4877 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038956 4877 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038960 4877 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038966 4877 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038972 4877 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038976 4877 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038981 4877 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038985 4877 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038990 4877 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.038995 4877 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039000 4877 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039004 4877 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039009 4877 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039015 4877 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039022 4877 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039029 4877 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039036 4877 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039041 4877 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039047 4877 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.039054 4877 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.039063 4877 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.040174 4877 server.go:940] "Client rotation is on, will bootstrap in background" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.044435 4877 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.044575 4877 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.046200 4877 server.go:997] "Starting client certificate rotation" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.046228 4877 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.046520 4877 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-25 08:48:52.010700687 +0000 UTC Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.046641 4877 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.092334 4877 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.096271 4877 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.097655 4877 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.124966 4877 log.go:25] "Validated CRI v1 runtime API" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.169771 4877 log.go:25] "Validated CRI v1 image API" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.173467 4877 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.179430 4877 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-28-16-30-13-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.179528 4877 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.206536 4877 manager.go:217] Machine: {Timestamp:2026-01-28 16:34:57.203261982 +0000 UTC m=+0.761588910 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f BootID:0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6 Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:ac:96:42 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:ac:96:42 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:40:d5:bd Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:e6:39:81 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:bd:ea:5e Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:dd:3f:6b Speed:-1 Mtu:1496} {Name:eth10 MacAddress:56:ba:ec:2f:4e:46 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:8a:45:d9:9a:0d:b5 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.206873 4877 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.207086 4877 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.212755 4877 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.213037 4877 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.213083 4877 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.213345 4877 topology_manager.go:138] "Creating topology manager with none policy" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.213358 4877 container_manager_linux.go:303] "Creating device plugin manager" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.219332 4877 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.219690 4877 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.219987 4877 state_mem.go:36] "Initialized new in-memory state store" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.220520 4877 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.224973 4877 kubelet.go:418] "Attempting to sync node with API server" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.225003 4877 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.225074 4877 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.225090 4877 kubelet.go:324] "Adding apiserver pod source" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.225102 4877 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.229627 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.229714 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.229797 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.230235 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.234134 4877 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.235437 4877 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.238290 4877 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242266 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242320 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242331 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242342 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242369 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242381 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242391 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242408 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242423 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242441 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242459 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.242492 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.253838 4877 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.254743 4877 server.go:1280] "Started kubelet" Jan 28 16:34:57 crc systemd[1]: Started Kubernetes Kubelet. Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.259854 4877 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.260063 4877 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.260768 4877 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.262872 4877 server.go:460] "Adding debug handlers to kubelet server" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.264638 4877 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.265851 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.265892 4877 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.266298 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 16:51:18.251235278 +0000 UTC Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.266573 4877 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.266589 4877 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.266678 4877 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.266257 4877 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.34:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188ef2489af80579 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 16:34:57.254696313 +0000 UTC m=+0.813023231,LastTimestamp:2026-01-28 16:34:57.254696313 +0000 UTC m=+0.813023231,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.267918 4877 factory.go:55] Registering systemd factory Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.268012 4877 factory.go:221] Registration of the systemd container factory successfully Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.268280 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.268342 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.268460 4877 factory.go:153] Registering CRI-O factory Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.268496 4877 factory.go:221] Registration of the crio container factory successfully Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.268500 4877 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.268581 4877 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.268607 4877 factory.go:103] Registering Raw factory Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.268625 4877 manager.go:1196] Started watching for new ooms in manager Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.268968 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="200ms" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.269339 4877 manager.go:319] Starting recovery of all containers Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278470 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278563 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278580 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278595 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278611 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278626 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278641 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278656 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278673 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278692 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278713 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278729 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278744 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278765 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278778 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278836 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278854 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278874 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278890 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278913 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278934 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278949 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278964 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278981 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.278996 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.279014 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.279059 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.279079 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.279096 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.279110 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.279125 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.279140 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.288847 4877 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.288918 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.288938 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.288953 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.288966 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.288979 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.288991 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289005 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289045 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289058 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289075 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289087 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289099 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289115 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289129 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289141 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289156 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289177 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289178 4877 manager.go:324] Recovery completed Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289195 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289308 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289338 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289363 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289385 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289404 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289421 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289447 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289462 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289500 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289515 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289529 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289543 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289562 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289584 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289599 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289613 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289631 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289648 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289663 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289680 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289696 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289710 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289727 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289746 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289763 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289780 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289798 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289846 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289864 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289880 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289916 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289931 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289952 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289970 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.289988 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290005 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290021 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290039 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290056 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290077 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290094 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290111 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290130 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290144 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290164 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290179 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290196 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290210 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290272 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290289 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290306 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290323 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290339 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290355 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290380 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290399 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290416 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290433 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290451 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290468 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290506 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290527 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290546 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290563 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290588 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290606 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290624 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290640 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290659 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290699 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290716 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290733 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290753 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290771 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290789 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290808 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290829 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290847 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290864 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290886 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290904 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290921 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290936 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290953 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290967 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290978 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.290990 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291003 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291017 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291027 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291038 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291050 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291062 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291074 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291085 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291389 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291406 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291419 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291431 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291443 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291456 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291468 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291500 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291512 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291523 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291534 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291545 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291558 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291570 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291581 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291593 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291605 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291616 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291627 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291640 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291652 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291664 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291677 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291690 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291703 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291715 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291725 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291736 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291748 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291759 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291770 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291780 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291790 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291801 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291812 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291825 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291833 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291845 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291855 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291865 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291874 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291886 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291897 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291906 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291917 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291926 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291935 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291945 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291955 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291967 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291977 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291987 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.291997 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292009 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292024 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292036 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292049 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292061 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292072 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292086 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292099 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292112 4877 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292123 4877 reconstruct.go:97] "Volume reconstruction finished" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.292132 4877 reconciler.go:26] "Reconciler: start to sync state" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.303612 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.306376 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.306455 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.306471 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.308232 4877 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.308253 4877 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.308285 4877 state_mem.go:36] "Initialized new in-memory state store" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.327276 4877 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.329113 4877 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.329171 4877 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.329209 4877 kubelet.go:2335] "Starting kubelet main sync loop" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.329301 4877 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.329960 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.330033 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.351601 4877 policy_none.go:49] "None policy: Start" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.352852 4877 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.352895 4877 state_mem.go:35] "Initializing new in-memory state store" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.368677 4877 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.427901 4877 manager.go:334] "Starting Device Plugin manager" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.427969 4877 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.427984 4877 server.go:79] "Starting device plugin registration server" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.428436 4877 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.428448 4877 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.428975 4877 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.429059 4877 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.429068 4877 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.430200 4877 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.430288 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.431505 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.431532 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.431541 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.431667 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.431812 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.431857 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.432509 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.432544 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.432584 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.432690 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.432778 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.432816 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.432826 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433147 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433198 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433589 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433617 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433626 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433802 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433858 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.433879 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435199 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435275 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435289 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435429 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435460 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435497 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435535 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435603 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.435636 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.436470 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.437116 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.437258 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.439381 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.439425 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.439429 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.439489 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.440167 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.440196 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.440546 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.440582 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.441687 4877 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.441916 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.441942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.441951 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.470593 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="400ms" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494618 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494667 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494693 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494721 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494742 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494758 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494775 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494849 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494900 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494941 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494965 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.494984 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.495018 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.495037 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.495054 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.528888 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.531109 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.531156 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.531168 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.531204 4877 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.531952 4877 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.34:6443: connect: connection refused" node="crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.595955 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596045 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596073 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596101 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596127 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596157 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596182 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596209 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596208 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596241 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596247 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596305 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596338 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596305 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596392 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596372 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596312 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596273 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596383 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596398 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596379 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596504 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596566 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596585 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596591 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596611 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596649 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596657 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.596756 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.733124 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.735137 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.735185 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.735197 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.735227 4877 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.735906 4877 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.34:6443: connect: connection refused" node="crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.773750 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.803630 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.812015 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.832390 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.844503 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-8ab7fa91039b5f81f3c41332ba79ae2ba12a91c19c7a051d483061d003c05673 WatchSource:0}: Error finding container 8ab7fa91039b5f81f3c41332ba79ae2ba12a91c19c7a051d483061d003c05673: Status 404 returned error can't find the container with id 8ab7fa91039b5f81f3c41332ba79ae2ba12a91c19c7a051d483061d003c05673 Jan 28 16:34:57 crc kubenswrapper[4877]: I0128 16:34:57.844882 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.862658 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-353ec808049fc9b3b61375928826825aed7494a0ffe8c423bb470a45975b7c7b WatchSource:0}: Error finding container 353ec808049fc9b3b61375928826825aed7494a0ffe8c423bb470a45975b7c7b: Status 404 returned error can't find the container with id 353ec808049fc9b3b61375928826825aed7494a0ffe8c423bb470a45975b7c7b Jan 28 16:34:57 crc kubenswrapper[4877]: E0128 16:34:57.872373 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="800ms" Jan 28 16:34:57 crc kubenswrapper[4877]: W0128 16:34:57.873621 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-6faebe44cd24b5e9211510b8c2c8d5faa6094431905960286d7505f7438fbde4 WatchSource:0}: Error finding container 6faebe44cd24b5e9211510b8c2c8d5faa6094431905960286d7505f7438fbde4: Status 404 returned error can't find the container with id 6faebe44cd24b5e9211510b8c2c8d5faa6094431905960286d7505f7438fbde4 Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.136282 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.137676 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.137711 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.137721 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.137754 4877 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.138944 4877 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.34:6443: connect: connection refused" node="crc" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.266299 4877 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.266420 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 02:12:37.870215996 +0000 UTC Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.334072 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6faebe44cd24b5e9211510b8c2c8d5faa6094431905960286d7505f7438fbde4"} Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.335020 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"353ec808049fc9b3b61375928826825aed7494a0ffe8c423bb470a45975b7c7b"} Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.337321 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"17f16cb1fea871abf31d55c386e42f4ba7d5284f99e0643a86c98b466e48ec77"} Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.338250 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"100b75bebd55ec3147bb8dfdfb3c74dbdb7b8af511605e3d76b3c097deadf5d6"} Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.339406 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"8ab7fa91039b5f81f3c41332ba79ae2ba12a91c19c7a051d483061d003c05673"} Jan 28 16:34:58 crc kubenswrapper[4877]: W0128 16:34:58.427576 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.427709 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:58 crc kubenswrapper[4877]: W0128 16:34:58.559372 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.559525 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.674084 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="1.6s" Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.780754 4877 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.34:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188ef2489af80579 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 16:34:57.254696313 +0000 UTC m=+0.813023231,LastTimestamp:2026-01-28 16:34:57.254696313 +0000 UTC m=+0.813023231,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 16:34:58 crc kubenswrapper[4877]: W0128 16:34:58.806294 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.806995 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:58 crc kubenswrapper[4877]: W0128 16:34:58.900641 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.900729 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.939983 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.941970 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.942010 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.942022 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:58 crc kubenswrapper[4877]: I0128 16:34:58.942056 4877 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 16:34:58 crc kubenswrapper[4877]: E0128 16:34:58.942613 4877 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.34:6443: connect: connection refused" node="crc" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.179316 4877 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 16:34:59 crc kubenswrapper[4877]: E0128 16:34:59.181968 4877 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.266145 4877 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.267557 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 11:51:41.361951428 +0000 UTC Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.345848 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc"} Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.345908 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a"} Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.348470 4877 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4" exitCode=0 Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.348580 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.348609 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4"} Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.349840 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.349864 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.349872 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.350874 4877 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74" exitCode=0 Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.350966 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74"} Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.351037 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.352226 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.352244 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.352256 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.352849 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.354156 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.354181 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.354198 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.355868 4877 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84" exitCode=0 Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.355927 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84"} Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.356010 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.356944 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.356962 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.356972 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.359143 4877 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047" exitCode=0 Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.359178 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047"} Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.359224 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.360022 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.360048 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:34:59 crc kubenswrapper[4877]: I0128 16:34:59.360058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.266445 4877 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.267669 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 00:58:38.218802603 +0000 UTC Jan 28 16:35:00 crc kubenswrapper[4877]: E0128 16:35:00.275157 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="3.2s" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.364348 4877 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f" exitCode=0 Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.364435 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.364581 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.365549 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.365581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.365592 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.366865 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"363e810ae2dd4b41a9ba40fde0270f216d292d9b1d4f31191304a846ac176245"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.366893 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.373755 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.373831 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.373847 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.384640 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.384700 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.384714 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.384828 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.385798 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.385823 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.385834 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.394858 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.394940 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.395051 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.396281 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.396323 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.396339 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.404354 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.404387 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.404396 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.404405 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390"} Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.543349 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.544804 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.544850 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.544861 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:00 crc kubenswrapper[4877]: I0128 16:35:00.544890 4877 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 16:35:00 crc kubenswrapper[4877]: E0128 16:35:00.545277 4877 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.34:6443: connect: connection refused" node="crc" Jan 28 16:35:00 crc kubenswrapper[4877]: W0128 16:35:00.597374 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.34:6443: connect: connection refused Jan 28 16:35:00 crc kubenswrapper[4877]: E0128 16:35:00.597456 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.34:6443: connect: connection refused" logger="UnhandledError" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.268809 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 14:52:51.192156057 +0000 UTC Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.411838 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1"} Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.412022 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.413288 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.413331 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.413350 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.414629 4877 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28" exitCode=0 Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.414652 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28"} Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.414772 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.414791 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.414816 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.414772 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.415040 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416164 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416190 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416201 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416207 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416212 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416219 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416276 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416308 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416326 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416846 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.416943 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.417014 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.561395 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:01 crc kubenswrapper[4877]: I0128 16:35:01.592115 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.269257 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 09:11:18.456254786 +0000 UTC Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423181 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"360cd59a83a45618daa042cda86ec261bbd3065748ae9f6a6207a5eff1b896e6"} Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423309 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423267 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"456daa5d52777cc4d3373b3d6a540879c49cab8179b3d976f22a38ab529fe7ec"} Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423345 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6d01c6e8f334b7eb9b465bca94f19011489c9a5c9b1180110b3d88ab6e5a4c54"} Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423427 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"03c82c3e6c52f08c4d2bf0308e9d1088a27023c2ec8ce0a0c7c163977c0b6b8e"} Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423455 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4f9373257785e093cc79ea2af752b21c428cb5c15d85f41ad397a6f113404770"} Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423352 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423312 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423514 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.423309 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424572 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424624 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424650 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424775 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424807 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424964 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.424977 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.425163 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.425269 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.425354 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.746704 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:02 crc kubenswrapper[4877]: I0128 16:35:02.984892 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.269556 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 13:59:53.798388946 +0000 UTC Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.324930 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.425528 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.425583 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.425662 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.425802 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427131 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427166 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427175 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427224 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427279 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427295 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427354 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427442 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.427468 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.532922 4877 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.746048 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.750185 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.750245 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.750269 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:03 crc kubenswrapper[4877]: I0128 16:35:03.750314 4877 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.269999 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 00:14:56.48924627 +0000 UTC Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.429052 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.429718 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.430244 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.430276 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.430293 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.430451 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.430499 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:04 crc kubenswrapper[4877]: I0128 16:35:04.430558 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:05 crc kubenswrapper[4877]: I0128 16:35:05.270269 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 06:28:54.592289541 +0000 UTC Jan 28 16:35:05 crc kubenswrapper[4877]: I0128 16:35:05.747394 4877 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 16:35:05 crc kubenswrapper[4877]: I0128 16:35:05.747541 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.182336 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.183175 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.185306 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.185355 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.185365 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.270727 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 16:30:14.21621293 +0000 UTC Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.299352 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.300001 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.301639 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.301685 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.301697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.362334 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.434167 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.435616 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.435904 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:06 crc kubenswrapper[4877]: I0128 16:35:06.436076 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:07 crc kubenswrapper[4877]: I0128 16:35:07.271635 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 05:29:34.648779369 +0000 UTC Jan 28 16:35:07 crc kubenswrapper[4877]: E0128 16:35:07.442548 4877 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 16:35:07 crc kubenswrapper[4877]: I0128 16:35:07.716980 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:07 crc kubenswrapper[4877]: I0128 16:35:07.717179 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:07 crc kubenswrapper[4877]: I0128 16:35:07.718458 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:07 crc kubenswrapper[4877]: I0128 16:35:07.718541 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:07 crc kubenswrapper[4877]: I0128 16:35:07.718563 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:07 crc kubenswrapper[4877]: I0128 16:35:07.722616 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:08 crc kubenswrapper[4877]: I0128 16:35:08.272322 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 14:50:07.607714664 +0000 UTC Jan 28 16:35:08 crc kubenswrapper[4877]: I0128 16:35:08.438315 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:08 crc kubenswrapper[4877]: I0128 16:35:08.439652 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:08 crc kubenswrapper[4877]: I0128 16:35:08.439682 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:08 crc kubenswrapper[4877]: I0128 16:35:08.439691 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:08 crc kubenswrapper[4877]: I0128 16:35:08.444456 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:09 crc kubenswrapper[4877]: I0128 16:35:09.272606 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 17:02:31.319244963 +0000 UTC Jan 28 16:35:09 crc kubenswrapper[4877]: I0128 16:35:09.441391 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:09 crc kubenswrapper[4877]: I0128 16:35:09.442953 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:09 crc kubenswrapper[4877]: I0128 16:35:09.443014 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:09 crc kubenswrapper[4877]: I0128 16:35:09.443031 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:10 crc kubenswrapper[4877]: I0128 16:35:10.273696 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 19:38:47.315235388 +0000 UTC Jan 28 16:35:10 crc kubenswrapper[4877]: W0128 16:35:10.939923 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 28 16:35:10 crc kubenswrapper[4877]: I0128 16:35:10.940051 4877 trace.go:236] Trace[665838507]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 16:35:00.937) (total time: 10002ms): Jan 28 16:35:10 crc kubenswrapper[4877]: Trace[665838507]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (16:35:10.939) Jan 28 16:35:10 crc kubenswrapper[4877]: Trace[665838507]: [10.002705371s] [10.002705371s] END Jan 28 16:35:10 crc kubenswrapper[4877]: E0128 16:35:10.940082 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 28 16:35:11 crc kubenswrapper[4877]: W0128 16:35:11.201143 4877 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.201809 4877 trace.go:236] Trace[1670403064]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 16:35:01.199) (total time: 10002ms): Jan 28 16:35:11 crc kubenswrapper[4877]: Trace[1670403064]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:35:11.201) Jan 28 16:35:11 crc kubenswrapper[4877]: Trace[1670403064]: [10.002261601s] [10.002261601s] END Jan 28 16:35:11 crc kubenswrapper[4877]: E0128 16:35:11.201848 4877 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.266878 4877 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.274335 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 21:53:02.151868496 +0000 UTC Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.450140 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.452292 4877 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1" exitCode=255 Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.452384 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1"} Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.452745 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.454142 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.454236 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.454264 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.455607 4877 scope.go:117] "RemoveContainer" containerID="1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.623692 4877 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.623777 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.627516 4877 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 16:35:11 crc kubenswrapper[4877]: I0128 16:35:11.627590 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 16:35:12 crc kubenswrapper[4877]: I0128 16:35:12.275327 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 12:13:12.442765021 +0000 UTC Jan 28 16:35:12 crc kubenswrapper[4877]: I0128 16:35:12.458016 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 16:35:12 crc kubenswrapper[4877]: I0128 16:35:12.460058 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3"} Jan 28 16:35:12 crc kubenswrapper[4877]: I0128 16:35:12.460242 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:12 crc kubenswrapper[4877]: I0128 16:35:12.461240 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:12 crc kubenswrapper[4877]: I0128 16:35:12.461270 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:12 crc kubenswrapper[4877]: I0128 16:35:12.461282 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:13 crc kubenswrapper[4877]: I0128 16:35:13.275672 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 12:16:50.402589408 +0000 UTC Jan 28 16:35:14 crc kubenswrapper[4877]: I0128 16:35:14.276597 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 00:39:43.174788862 +0000 UTC Jan 28 16:35:15 crc kubenswrapper[4877]: I0128 16:35:15.276894 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 12:28:03.917550454 +0000 UTC Jan 28 16:35:15 crc kubenswrapper[4877]: I0128 16:35:15.446503 4877 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 16:35:15 crc kubenswrapper[4877]: I0128 16:35:15.748227 4877 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 16:35:15 crc kubenswrapper[4877]: I0128 16:35:15.748317 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.182753 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.183044 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.184560 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.184647 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.184682 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.277276 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 12:08:20.240894502 +0000 UTC Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.344872 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.345294 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.353274 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.353607 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.353779 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.369101 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.370990 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.473409 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.473570 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.475730 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.475787 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.475804 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.475807 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.475860 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.475884 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.484289 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.563286 4877 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 16:35:16 crc kubenswrapper[4877]: E0128 16:35:16.612922 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.616854 4877 trace.go:236] Trace[125949921]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 16:35:01.744) (total time: 14872ms): Jan 28 16:35:16 crc kubenswrapper[4877]: Trace[125949921]: ---"Objects listed" error: 14872ms (16:35:16.616) Jan 28 16:35:16 crc kubenswrapper[4877]: Trace[125949921]: [14.872122601s] [14.872122601s] END Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.616902 4877 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.617035 4877 trace.go:236] Trace[1468743802]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 16:35:05.382) (total time: 11234ms): Jan 28 16:35:16 crc kubenswrapper[4877]: Trace[1468743802]: ---"Objects listed" error: 11234ms (16:35:16.616) Jan 28 16:35:16 crc kubenswrapper[4877]: Trace[1468743802]: [11.234606529s] [11.234606529s] END Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.617064 4877 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.617733 4877 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 28 16:35:16 crc kubenswrapper[4877]: E0128 16:35:16.619896 4877 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 28 16:35:16 crc kubenswrapper[4877]: I0128 16:35:16.651091 4877 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.236559 4877 apiserver.go:52] "Watching apiserver" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.247754 4877 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.248229 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.248849 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.248947 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.249050 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.249144 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.249546 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.249629 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.249649 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.249836 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.249913 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.253085 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.253463 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.254057 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.256859 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.256861 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.257039 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.263413 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.263542 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.264220 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.268249 4877 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.277744 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 03:03:26.699603865 +0000 UTC Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.303979 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.316185 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323174 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323442 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323682 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323691 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323782 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323815 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323836 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323891 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323920 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323910 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.323947 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324050 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324094 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324196 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324248 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324244 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324307 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324379 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324409 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324436 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324462 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324506 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324534 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324571 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324590 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324595 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324651 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324699 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324726 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324758 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324801 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324812 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324825 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324848 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324871 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324923 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324932 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324946 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324974 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.324977 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325004 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325033 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325190 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325236 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325267 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325274 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325330 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325356 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325342 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325379 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325404 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325428 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325455 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325496 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325523 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325549 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325571 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325592 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325615 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325639 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325655 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325662 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325691 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325713 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325742 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325746 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325816 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325845 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325841 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325868 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325890 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325907 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325905 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325925 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325927 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.325970 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.326001 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:35:17.82593127 +0000 UTC m=+21.384258398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326025 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326052 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326075 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326098 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326120 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326145 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326169 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326181 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326196 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326250 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326259 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326309 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326339 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326365 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326390 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326405 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326420 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326451 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326510 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326537 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326563 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326591 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326615 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326639 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326665 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326687 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326713 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326737 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326763 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326800 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326824 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326850 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326877 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326899 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326925 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326948 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326990 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327013 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327035 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327061 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327086 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327111 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327135 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327157 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327177 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327198 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327219 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327240 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327261 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327282 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327301 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327322 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327345 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327366 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327385 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327408 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327431 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327451 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.328992 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329028 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329056 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329082 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329106 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329130 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329156 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329191 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329219 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329243 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329268 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329292 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329316 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329337 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329361 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329385 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329413 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329439 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329465 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329513 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329540 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329563 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329587 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329608 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329632 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329655 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329678 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329699 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329726 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329763 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329788 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329813 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329845 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329880 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329904 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329933 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329963 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329992 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330014 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330035 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330057 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330115 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330139 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330162 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330191 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330214 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330242 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330265 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330292 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330319 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330350 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330375 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330399 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330426 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330449 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330491 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330519 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330547 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330574 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330598 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330626 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330651 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330681 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330706 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330733 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330760 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330784 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330807 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330829 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330855 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330885 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330911 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330953 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330977 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331003 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331045 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331071 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331097 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331123 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331155 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331181 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331207 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331229 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331293 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331327 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331360 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331392 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331415 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331451 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331501 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331528 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331562 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331591 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331618 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331644 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331669 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331700 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331828 4877 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331846 4877 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331862 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331877 4877 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331893 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331908 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331923 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.333256 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326407 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340944 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340950 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326826 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327104 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327404 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.327530 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.328103 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.328439 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.328646 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.328940 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329166 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329273 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329386 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329824 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.329809 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330142 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330330 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.330503 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331051 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331106 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331568 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.331851 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.332175 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.344003 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.332391 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.332887 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.333008 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.333077 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.333557 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.334029 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.334349 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.334760 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.335988 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.337385 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.337440 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.339508 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.339726 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.339971 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340156 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340170 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340198 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340735 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340818 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.340768 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341196 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341232 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341306 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341420 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341530 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341725 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341955 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.344419 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.342243 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.342579 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.342829 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.342896 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.341261 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.343211 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.343698 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.343944 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.326586 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.342693 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.344608 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.344733 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.344926 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.345183 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.345234 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.345260 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.345640 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.346129 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.346418 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.346454 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.346762 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.347251 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.347570 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.347638 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.347722 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.347839 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.353138 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.354753 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.354850 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.356014 4877 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.359909 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.360568 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361128 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361274 4877 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361354 4877 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361402 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361460 4877 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361570 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361615 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361657 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361696 4877 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361753 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361799 4877 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361840 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.361881 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.363968 4877 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.364016 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.364059 4877 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.364597 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:17.864515373 +0000 UTC m=+21.422842321 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.364663 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:17.864643137 +0000 UTC m=+21.422970065 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.376263 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.376804 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.376881 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.376978 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.377566 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.377957 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.378243 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.378378 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.378574 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.378672 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.379496 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.382160 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.382824 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.382973 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.383078 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.383600 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.384409 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.385584 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.386899 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.387950 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.388036 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.388057 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.394829 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.395711 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.395978 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.396226 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.396320 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.396050 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.396578 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.396715 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:17.883369999 +0000 UTC m=+21.441696887 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.396830 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:17.896772154 +0000 UTC m=+21.455099292 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.397374 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.397701 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.397716 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.397734 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.397892 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.398497 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.398618 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.398933 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.399380 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.399828 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.401591 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.401748 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.401685 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.402048 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.402359 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.402438 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.402578 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.402709 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.402895 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.402995 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.403018 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.403107 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.403180 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.403579 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405297 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405522 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405614 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405652 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405723 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405764 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405823 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405845 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.405901 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.406894 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.407833 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.408195 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.408694 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.408888 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.408901 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.409181 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.409236 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.409220 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.409564 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.410206 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.411201 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.411855 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412015 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412211 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412262 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412384 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412403 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412486 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412919 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.412983 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.413445 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.413551 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414067 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414073 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414104 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414195 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414198 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414201 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414411 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414501 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414547 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414292 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.414837 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.415089 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.415375 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.416559 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.417068 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.418330 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.418882 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.419669 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.420225 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.420401 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.420881 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.421044 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.422787 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.423281 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.423307 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.423770 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.424420 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.424529 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.424575 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.424682 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.424698 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.424813 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.424972 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.425717 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.426320 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.428724 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.429146 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.429753 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.431613 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.432759 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.434239 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.434910 4877 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.435049 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.441111 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.442713 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.445015 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.445822 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.448290 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.450527 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.451527 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.451695 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.453323 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.455106 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.457043 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.457032 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.457809 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.458268 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.459962 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.460515 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.462289 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.463803 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.464848 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.464939 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465038 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465064 4877 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465078 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465095 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465110 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465141 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465161 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465172 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465184 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465196 4877 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465152 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465240 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465208 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465309 4877 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465368 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465357 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465390 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465401 4877 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465412 4877 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465453 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465490 4877 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465505 4877 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465516 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465526 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465537 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465548 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465562 4877 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465578 4877 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465588 4877 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465598 4877 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465609 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465620 4877 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465631 4877 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465642 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465652 4877 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465662 4877 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465675 4877 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465685 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465695 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465705 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465720 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465730 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465742 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465752 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465762 4877 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465773 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465782 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465792 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465803 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465816 4877 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465826 4877 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465837 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465846 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465855 4877 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465867 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465877 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465887 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465896 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465938 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465951 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465962 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465971 4877 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465982 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.465995 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466007 4877 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466017 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466030 4877 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466102 4877 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466150 4877 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466172 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466195 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466221 4877 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466243 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466279 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466299 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466318 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466339 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466362 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466384 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466405 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466429 4877 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466456 4877 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466245 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466524 4877 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466553 4877 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466577 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466601 4877 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466625 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466643 4877 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466662 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466680 4877 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466698 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466716 4877 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466733 4877 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466758 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466776 4877 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466794 4877 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466852 4877 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466871 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466889 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466908 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.466928 4877 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467056 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467078 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467096 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467114 4877 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467132 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467146 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467151 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467191 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467209 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467227 4877 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467245 4877 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467261 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467283 4877 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467301 4877 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467323 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467343 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467363 4877 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467381 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467398 4877 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467425 4877 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467451 4877 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467469 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467515 4877 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467534 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467551 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467569 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467586 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467608 4877 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467626 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467644 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467661 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467680 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467698 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467716 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467733 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467751 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467769 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467789 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467811 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467831 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467847 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467864 4877 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467912 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467930 4877 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467947 4877 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467965 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.467983 4877 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468000 4877 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468018 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468035 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468053 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468070 4877 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468086 4877 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468104 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468126 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468147 4877 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468168 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468189 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468210 4877 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468227 4877 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468244 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468263 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468281 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468298 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468315 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468332 4877 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468371 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468389 4877 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468407 4877 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468424 4877 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468442 4877 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468459 4877 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468500 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468518 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468535 4877 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.468593 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.469276 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.470058 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.470981 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.471518 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.471624 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.472907 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.474169 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.481781 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.490814 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.499681 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.578583 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.596105 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 16:35:17 crc kubenswrapper[4877]: W0128 16:35:17.597150 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-19e387d2e65f4a4e001573f298d5537805f95215965b8cec7f2f5a64e2f7c1f4 WatchSource:0}: Error finding container 19e387d2e65f4a4e001573f298d5537805f95215965b8cec7f2f5a64e2f7c1f4: Status 404 returned error can't find the container with id 19e387d2e65f4a4e001573f298d5537805f95215965b8cec7f2f5a64e2f7c1f4 Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.605588 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.874359 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.874674 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:35:18.874634821 +0000 UTC m=+22.432961849 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.874863 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.874914 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.875063 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.875151 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:18.875130223 +0000 UTC m=+22.433457111 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.875262 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.875334 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:18.875321038 +0000 UTC m=+22.433648066 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.975513 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:17 crc kubenswrapper[4877]: I0128 16:35:17.975583 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.975725 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.975747 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.975761 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.975819 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:18.975803406 +0000 UTC m=+22.534130294 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.975845 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.975940 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.975966 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:17 crc kubenswrapper[4877]: E0128 16:35:17.976149 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:18.976063602 +0000 UTC m=+22.534390520 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.278577 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 18:24:16.332028002 +0000 UTC Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.329558 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.329734 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.481500 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0"} Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.481563 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b"} Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.481582 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"87202ce32312b4bb31cdbeb98e016f3176d3b610ddccba7aa52069765732e835"} Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.483799 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ef77e12a7878e716a5e0ee466f03c6545a0fa172afcb38839a095e6a5bb1294f"} Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.489237 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003"} Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.489387 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"19e387d2e65f4a4e001573f298d5537805f95215965b8cec7f2f5a64e2f7c1f4"} Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.502061 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.524002 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.539178 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.551463 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.563144 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.576769 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.590861 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.605414 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.618923 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.631453 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.644908 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.657849 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.673749 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.693380 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:18Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.885334 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.885457 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.885541 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.885614 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.885634 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:35:20.885585995 +0000 UTC m=+24.443912893 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.885674 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.885705 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:20.885685107 +0000 UTC m=+24.444011995 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.885726 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:20.885712888 +0000 UTC m=+24.444039936 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.986583 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:18 crc kubenswrapper[4877]: I0128 16:35:18.986642 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.986797 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.986817 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.986797 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.986849 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.986856 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.986939 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:20.986920944 +0000 UTC m=+24.545247832 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.986859 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:18 crc kubenswrapper[4877]: E0128 16:35:18.987307 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:20.987298953 +0000 UTC m=+24.545625841 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.278941 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 19:28:53.206297995 +0000 UTC Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.330588 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.330650 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:19 crc kubenswrapper[4877]: E0128 16:35:19.330781 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:19 crc kubenswrapper[4877]: E0128 16:35:19.330914 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.335338 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.336122 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.337231 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.338116 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.339322 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.340425 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.341311 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 28 16:35:19 crc kubenswrapper[4877]: I0128 16:35:19.342057 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 28 16:35:20 crc kubenswrapper[4877]: I0128 16:35:20.279443 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 18:06:25.955063118 +0000 UTC Jan 28 16:35:20 crc kubenswrapper[4877]: I0128 16:35:20.330423 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:20 crc kubenswrapper[4877]: E0128 16:35:20.330634 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:20 crc kubenswrapper[4877]: I0128 16:35:20.901691 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:20 crc kubenswrapper[4877]: I0128 16:35:20.901810 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:20 crc kubenswrapper[4877]: E0128 16:35:20.901908 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:35:24.901874788 +0000 UTC m=+28.460201676 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:35:20 crc kubenswrapper[4877]: I0128 16:35:20.901967 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:20 crc kubenswrapper[4877]: E0128 16:35:20.902126 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:20 crc kubenswrapper[4877]: E0128 16:35:20.902241 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:24.902217287 +0000 UTC m=+28.460544325 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:20 crc kubenswrapper[4877]: E0128 16:35:20.902127 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:20 crc kubenswrapper[4877]: E0128 16:35:20.902300 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:24.902289039 +0000 UTC m=+28.460616137 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:20 crc kubenswrapper[4877]: I0128 16:35:20.953766 4877 csr.go:261] certificate signing request csr-t7tv7 is approved, waiting to be issued Jan 28 16:35:20 crc kubenswrapper[4877]: I0128 16:35:20.971934 4877 csr.go:257] certificate signing request csr-t7tv7 is issued Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.002666 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.002763 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.002919 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.002957 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.002962 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.003005 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.003026 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.002969 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.003099 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:25.003075014 +0000 UTC m=+28.561401902 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.003147 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:25.003124045 +0000 UTC m=+28.561450933 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.279731 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 20:57:08.644598614 +0000 UTC Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.329537 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.329646 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.329705 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:21 crc kubenswrapper[4877]: E0128 16:35:21.329843 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.397852 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-6xsrm"] Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.398305 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.398393 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-hbxsq"] Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.400444 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405186 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-mcd-auth-proxy-config\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405288 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-conf-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405346 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpkkj\" (UniqueName: \"kubernetes.io/projected/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-kube-api-access-fpkkj\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405409 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-netns\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405450 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-cni-bin\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405525 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-cni-multus\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405574 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-multus-certs\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405670 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-cni-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405709 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-k8s-cni-cncf-io\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405749 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-cni-binary-copy\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405789 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-daemon-config\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405830 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-rootfs\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.405930 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-proxy-tls\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406033 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9hnr\" (UniqueName: \"kubernetes.io/projected/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-kube-api-access-t9hnr\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406074 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-cnibin\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406142 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-os-release\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406200 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-kubelet\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406241 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-hostroot\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406280 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-etc-kubernetes\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406323 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-system-cni-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.406377 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-socket-dir-parent\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.407071 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.409453 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.410372 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.411853 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.412634 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gf9xn"] Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.412666 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.412922 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.413176 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.413394 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.413555 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-qn64p"] Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.413738 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5gw27"] Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.414208 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.414399 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.414401 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.414903 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.415454 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.418037 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.418219 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.418262 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.418351 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.418408 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.418567 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.419872 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.420330 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.420526 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.420749 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.420943 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.421156 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.434876 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.451614 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.470059 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.494525 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.498275 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837"} Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507371 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdz8t\" (UniqueName: \"kubernetes.io/projected/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-kube-api-access-gdz8t\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507416 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-kubelet\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507436 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-hostroot\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507490 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-etc-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507508 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-node-log\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507527 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/96167230-b465-4037-a8ac-23bec379d4ba-hosts-file\") pod \"node-resolver-qn64p\" (UID: \"96167230-b465-4037-a8ac-23bec379d4ba\") " pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507540 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-kubelet\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507546 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-etc-kubernetes\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507604 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507625 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-hostroot\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507577 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-etc-kubernetes\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507641 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-env-overrides\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507756 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-system-cni-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507809 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507858 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-os-release\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507890 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-system-cni-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.507957 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-config\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508058 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovn-node-metrics-cert\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508131 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cnibin\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508176 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-systemd-units\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508289 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-cni-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508353 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-k8s-cni-cncf-io\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508378 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-multus-certs\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508409 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-var-lib-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508431 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-bin\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508446 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-multus-certs\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508447 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-k8s-cni-cncf-io\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508493 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-rootfs\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508527 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-daemon-config\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508548 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-rootfs\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508558 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cni-binary-copy\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508627 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508658 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-ovn\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508672 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-cni-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508764 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9hnr\" (UniqueName: \"kubernetes.io/projected/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-kube-api-access-t9hnr\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508785 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-cnibin\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508822 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-os-release\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508841 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-systemd\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509066 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvg9l\" (UniqueName: \"kubernetes.io/projected/96167230-b465-4037-a8ac-23bec379d4ba-kube-api-access-xvg9l\") pod \"node-resolver-qn64p\" (UID: \"96167230-b465-4037-a8ac-23bec379d4ba\") " pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509092 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-socket-dir-parent\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509189 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-kubelet\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509213 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-slash\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509283 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-ovn-kubernetes\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509307 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-netns\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509151 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-socket-dir-parent\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.508872 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-cnibin\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509026 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-os-release\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509409 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-daemon-config\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509433 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-mcd-auth-proxy-config\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509456 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-conf-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509487 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpkkj\" (UniqueName: \"kubernetes.io/projected/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-kube-api-access-fpkkj\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509505 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk6qf\" (UniqueName: \"kubernetes.io/projected/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-kube-api-access-zk6qf\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509525 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509551 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-multus-conf-dir\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509583 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-system-cni-dir\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509601 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-netd\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509673 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-netns\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509691 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-cni-bin\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509748 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-run-netns\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509782 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-cni-multus\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509790 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-cni-bin\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509801 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-proxy-tls\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509823 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-host-var-lib-cni-multus\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509815 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-cni-binary-copy\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509874 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-log-socket\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.509892 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-script-lib\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.510364 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-mcd-auth-proxy-config\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.510457 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-cni-binary-copy\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.512722 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.520143 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-proxy-tls\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.529984 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpkkj\" (UniqueName: \"kubernetes.io/projected/2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a-kube-api-access-fpkkj\") pod \"multus-hbxsq\" (UID: \"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\") " pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.530797 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9hnr\" (UniqueName: \"kubernetes.io/projected/95a2e787-3c51-42f8-b6fc-46b7c39ed39d-kube-api-access-t9hnr\") pod \"machine-config-daemon-6xsrm\" (UID: \"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\") " pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.540928 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.564828 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.597061 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.610904 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-log-socket\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.610951 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-script-lib\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.610974 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-etc-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.610992 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-node-log\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611010 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdz8t\" (UniqueName: \"kubernetes.io/projected/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-kube-api-access-gdz8t\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611031 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/96167230-b465-4037-a8ac-23bec379d4ba-hosts-file\") pod \"node-resolver-qn64p\" (UID: \"96167230-b465-4037-a8ac-23bec379d4ba\") " pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611052 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611070 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-env-overrides\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611067 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-log-socket\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611141 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-etc-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611097 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611271 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-node-log\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611324 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-os-release\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611356 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-config\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611390 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovn-node-metrics-cert\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611441 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cnibin\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611443 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-os-release\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611461 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-systemd-units\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611507 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cnibin\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611463 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/96167230-b465-4037-a8ac-23bec379d4ba-hosts-file\") pod \"node-resolver-qn64p\" (UID: \"96167230-b465-4037-a8ac-23bec379d4ba\") " pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611522 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-var-lib-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611215 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611641 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-systemd-units\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611652 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-var-lib-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611642 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-bin\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611790 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cni-binary-copy\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611680 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-bin\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611822 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611926 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-ovn\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611987 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-ovn\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.611992 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-env-overrides\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612086 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-systemd\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612116 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvg9l\" (UniqueName: \"kubernetes.io/projected/96167230-b465-4037-a8ac-23bec379d4ba-kube-api-access-xvg9l\") pod \"node-resolver-qn64p\" (UID: \"96167230-b465-4037-a8ac-23bec379d4ba\") " pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612120 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-systemd\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612147 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-kubelet\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612168 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-slash\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612187 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-ovn-kubernetes\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612227 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-kubelet\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612252 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk6qf\" (UniqueName: \"kubernetes.io/projected/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-kube-api-access-zk6qf\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612269 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-ovn-kubernetes\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612279 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-netns\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612190 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-script-lib\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612297 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-slash\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612300 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612328 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-openvswitch\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612343 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-system-cni-dir\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612363 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-netns\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612369 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-netd\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612393 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-system-cni-dir\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612438 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-netd\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612755 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cni-binary-copy\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612842 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.612859 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.613356 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-config\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.615957 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovn-node-metrics-cert\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.637860 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvg9l\" (UniqueName: \"kubernetes.io/projected/96167230-b465-4037-a8ac-23bec379d4ba-kube-api-access-xvg9l\") pod \"node-resolver-qn64p\" (UID: \"96167230-b465-4037-a8ac-23bec379d4ba\") " pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.643136 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdz8t\" (UniqueName: \"kubernetes.io/projected/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-kube-api-access-gdz8t\") pod \"ovnkube-node-5gw27\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.646622 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk6qf\" (UniqueName: \"kubernetes.io/projected/d5c6b6a7-d88e-419a-b28a-a4ae06d24576-kube-api-access-zk6qf\") pod \"multus-additional-cni-plugins-gf9xn\" (UID: \"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\") " pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.656582 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.705593 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.723428 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.732442 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hbxsq" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.743754 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.748841 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-qn64p" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.748867 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: W0128 16:35:21.749672 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a17664e_66c0_4a75_9ac1_50ac0f8f0c7a.slice/crio-3d987672b6b9158ac709cb490271c04145f296981b49763b9724b342642f2e07 WatchSource:0}: Error finding container 3d987672b6b9158ac709cb490271c04145f296981b49763b9724b342642f2e07: Status 404 returned error can't find the container with id 3d987672b6b9158ac709cb490271c04145f296981b49763b9724b342642f2e07 Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.759750 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.798428 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.829465 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.868178 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.885668 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.903113 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.930371 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.949645 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.962396 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.972926 4877 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-28 16:30:20 +0000 UTC, rotation deadline is 2026-11-24 18:45:15.483202303 +0000 UTC Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.973007 4877 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7202h9m53.510198571s for next certificate rotation Jan 28 16:35:21 crc kubenswrapper[4877]: I0128 16:35:21.977962 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.280024 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 07:43:39.390883973 +0000 UTC Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.329846 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:22 crc kubenswrapper[4877]: E0128 16:35:22.330023 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.504657 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280" exitCode=0 Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.504729 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.504760 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"7230247f1466a5475d643ecd972c2b5247a82a76f574c3c9e36a7230af1d6b1b"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.508104 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.508171 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.508183 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"c90a5e1ef0edc996f08602651eaaf91420326cdaecfb8d58d0798592bdc54ea4"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.510906 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-qn64p" event={"ID":"96167230-b465-4037-a8ac-23bec379d4ba","Type":"ContainerStarted","Data":"e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.510965 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-qn64p" event={"ID":"96167230-b465-4037-a8ac-23bec379d4ba","Type":"ContainerStarted","Data":"1ab3d9271f5cf689d44f29e589cb1bb6120f0e16e1a07c8257b1b3c9caa0419c"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.513200 4877 generic.go:334] "Generic (PLEG): container finished" podID="d5c6b6a7-d88e-419a-b28a-a4ae06d24576" containerID="8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1" exitCode=0 Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.513286 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerDied","Data":"8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.513311 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerStarted","Data":"433cec17c34e7a9b6efe92d9012d085b5b944aec28782a9d5db71f49561da0b7"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.515857 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerStarted","Data":"1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.515878 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerStarted","Data":"3d987672b6b9158ac709cb490271c04145f296981b49763b9724b342642f2e07"} Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.526144 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.547231 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.562167 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.582876 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.603511 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.620751 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.636405 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.651054 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.702713 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.740701 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.750554 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.761105 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.762782 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.766211 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.778537 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.793603 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.806207 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.822245 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.836180 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.850906 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.876677 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.892457 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.905800 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.922876 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.936735 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.959258 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.976024 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:22 crc kubenswrapper[4877]: I0128 16:35:22.994356 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.020923 4877 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.023981 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.024037 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.024050 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.024319 4877 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.031594 4877 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.031903 4877 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.033173 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.033214 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.033226 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.033249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.033263 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.054713 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.064510 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.064573 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.064591 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.064613 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.064625 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.076806 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.080992 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.081083 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.081107 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.081144 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.081174 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.095759 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.099906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.099979 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.100000 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.100028 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.100048 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.113659 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.118743 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.118821 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.118841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.118867 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.118889 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.138548 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.138678 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.140770 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.140800 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.140811 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.140834 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.140850 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.243830 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.243871 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.243885 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.243906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.243921 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.280903 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 17:44:53.277166776 +0000 UTC Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.330339 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.330572 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.331134 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:23 crc kubenswrapper[4877]: E0128 16:35:23.331254 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.346526 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.346579 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.346597 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.346624 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.346644 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.449292 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.449800 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.449815 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.449839 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.449856 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.522521 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.522582 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.522597 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.524385 4877 generic.go:334] "Generic (PLEG): container finished" podID="d5c6b6a7-d88e-419a-b28a-a4ae06d24576" containerID="f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e" exitCode=0 Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.524469 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerDied","Data":"f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.540632 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.552713 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.552769 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.552780 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.552801 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.552814 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.560356 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.577991 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.591141 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.618971 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.638561 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.656092 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.656394 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.656431 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.656446 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.656465 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.656497 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.668706 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.683985 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.697992 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.715126 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.728370 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.743360 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:23Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.760758 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.760796 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.760806 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.760821 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.760830 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.862632 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.862661 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.862670 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.862683 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.862691 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.966918 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.966964 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.966975 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.966995 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:23 crc kubenswrapper[4877]: I0128 16:35:23.967008 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:23Z","lastTransitionTime":"2026-01-28T16:35:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.070061 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.070540 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.070561 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.070586 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.070605 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.104970 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-x2fwz"] Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.105564 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.108368 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.108657 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.108691 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.109773 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.133538 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.141511 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3568823b-ecd1-4e61-a47f-fda701dd8796-serviceca\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.141558 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3568823b-ecd1-4e61-a47f-fda701dd8796-host\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.141596 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn77b\" (UniqueName: \"kubernetes.io/projected/3568823b-ecd1-4e61-a47f-fda701dd8796-kube-api-access-mn77b\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.152721 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.167851 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.172803 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.172861 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.172881 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.172907 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.172927 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.183009 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.200240 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.217102 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.228973 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.242329 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3568823b-ecd1-4e61-a47f-fda701dd8796-host\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.242393 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn77b\" (UniqueName: \"kubernetes.io/projected/3568823b-ecd1-4e61-a47f-fda701dd8796-kube-api-access-mn77b\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.242434 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3568823b-ecd1-4e61-a47f-fda701dd8796-serviceca\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.242561 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3568823b-ecd1-4e61-a47f-fda701dd8796-host\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.243435 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.243666 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3568823b-ecd1-4e61-a47f-fda701dd8796-serviceca\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.259153 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.261613 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn77b\" (UniqueName: \"kubernetes.io/projected/3568823b-ecd1-4e61-a47f-fda701dd8796-kube-api-access-mn77b\") pod \"node-ca-x2fwz\" (UID: \"3568823b-ecd1-4e61-a47f-fda701dd8796\") " pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.275579 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.275625 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.275636 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.275652 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.275664 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.281742 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 05:36:14.179515647 +0000 UTC Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.282792 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.296981 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.312280 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.326500 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.329844 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:24 crc kubenswrapper[4877]: E0128 16:35:24.330030 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.341304 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.379067 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.379138 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.379157 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.379181 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.379201 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.421559 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-x2fwz" Jan 28 16:35:24 crc kubenswrapper[4877]: W0128 16:35:24.433561 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3568823b_ecd1_4e61_a47f_fda701dd8796.slice/crio-2274511de94dace0cdaf9e35b915354d67dca9183a4f5a88f848e4c57540da7c WatchSource:0}: Error finding container 2274511de94dace0cdaf9e35b915354d67dca9183a4f5a88f848e4c57540da7c: Status 404 returned error can't find the container with id 2274511de94dace0cdaf9e35b915354d67dca9183a4f5a88f848e4c57540da7c Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.482074 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.482122 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.482133 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.482153 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.482164 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.533275 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-x2fwz" event={"ID":"3568823b-ecd1-4e61-a47f-fda701dd8796","Type":"ContainerStarted","Data":"2274511de94dace0cdaf9e35b915354d67dca9183a4f5a88f848e4c57540da7c"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.539655 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.539711 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.539722 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.543915 4877 generic.go:334] "Generic (PLEG): container finished" podID="d5c6b6a7-d88e-419a-b28a-a4ae06d24576" containerID="d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231" exitCode=0 Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.543963 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerDied","Data":"d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.563075 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.581438 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.588668 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.588735 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.588750 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.588769 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.588782 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.602213 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.617538 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.631461 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.656246 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.680671 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.700222 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.714245 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.714290 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.714299 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.714317 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.714331 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.722107 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.739661 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.754302 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.771854 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.784691 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.798536 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.816784 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.816833 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.816848 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.816872 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.816901 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.919201 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.919249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.919262 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.919280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.919292 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:24Z","lastTransitionTime":"2026-01-28T16:35:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.949285 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:24 crc kubenswrapper[4877]: E0128 16:35:24.949514 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:35:32.949453521 +0000 UTC m=+36.507780409 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.949602 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:24 crc kubenswrapper[4877]: I0128 16:35:24.949674 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:24 crc kubenswrapper[4877]: E0128 16:35:24.949795 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:24 crc kubenswrapper[4877]: E0128 16:35:24.949838 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:24 crc kubenswrapper[4877]: E0128 16:35:24.949859 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:32.949851042 +0000 UTC m=+36.508177930 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:24 crc kubenswrapper[4877]: E0128 16:35:24.949939 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:32.949915044 +0000 UTC m=+36.508241932 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.022449 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.022552 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.022567 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.022592 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.022616 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.050581 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.051069 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.050905 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.051328 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.051349 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.051458 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:33.051431958 +0000 UTC m=+36.609758866 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.051292 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.051994 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.052009 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.052047 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:33.052036544 +0000 UTC m=+36.610363452 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.125067 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.125119 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.125135 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.125158 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.125171 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.228163 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.228223 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.228240 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.228265 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.228282 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.282651 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 15:20:42.41438658 +0000 UTC Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.329502 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.329517 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.330005 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:25 crc kubenswrapper[4877]: E0128 16:35:25.330098 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.331677 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.331741 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.331762 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.331789 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.331809 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.434736 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.434782 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.434791 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.434805 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.434815 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.537869 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.538151 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.538261 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.538378 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.538502 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.550326 4877 generic.go:334] "Generic (PLEG): container finished" podID="d5c6b6a7-d88e-419a-b28a-a4ae06d24576" containerID="cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703" exitCode=0 Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.550653 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerDied","Data":"cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.552536 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-x2fwz" event={"ID":"3568823b-ecd1-4e61-a47f-fda701dd8796","Type":"ContainerStarted","Data":"5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.577042 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.596106 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.609980 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.624444 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.643911 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.643949 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.643958 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.643978 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.643988 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.648537 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.674101 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.690659 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.701917 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.714861 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.728266 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.739700 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.747246 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.747330 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.747350 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.747378 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.747428 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.762840 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.776968 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.797576 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.814339 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.826180 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.850599 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.850951 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.851045 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.850881 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.851199 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.851438 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.867999 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.884358 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.897888 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.913170 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.928536 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.945571 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.953906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.953946 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.953959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.953977 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.953989 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:25Z","lastTransitionTime":"2026-01-28T16:35:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.962996 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.977369 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:25 crc kubenswrapper[4877]: I0128 16:35:25.990020 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:25Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.008196 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.022578 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.056697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.056911 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.057146 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.057405 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.057532 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.161620 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.161697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.161718 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.161742 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.161758 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.189957 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.206024 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.221130 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.233999 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.249302 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.260102 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.264532 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.264594 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.264608 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.264629 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.264646 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.275552 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.283001 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 20:58:11.183048866 +0000 UTC Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.290994 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.309077 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.321717 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.330124 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:26 crc kubenswrapper[4877]: E0128 16:35:26.330255 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.333023 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.343590 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.355823 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.366498 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.367069 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.367123 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.367136 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.367156 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.367169 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.388369 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.469812 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.469875 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.469893 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.469915 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.469932 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.562159 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.565653 4877 generic.go:334] "Generic (PLEG): container finished" podID="d5c6b6a7-d88e-419a-b28a-a4ae06d24576" containerID="5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b" exitCode=0 Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.566117 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerDied","Data":"5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.571959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.572012 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.572030 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.572055 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.572075 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.591729 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.616066 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.638854 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.658240 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.674468 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.676739 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.676829 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.676844 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.676889 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.676903 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.695073 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.708037 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.722749 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.739109 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.751083 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.765454 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.780255 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.780285 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.780294 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.780310 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.780320 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.783940 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.798700 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.821858 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:26Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.883105 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.883190 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.883211 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.883243 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.883266 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.986972 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.987523 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.987683 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.987886 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:26 crc kubenswrapper[4877]: I0128 16:35:26.988048 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:26Z","lastTransitionTime":"2026-01-28T16:35:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.046946 4877 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.090974 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.091094 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.091105 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.091120 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.091131 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.194158 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.194528 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.194635 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.194736 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.194821 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.283291 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 20:37:27.928317811 +0000 UTC Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.298223 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.298286 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.298311 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.298336 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.298351 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.329720 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.329720 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:27 crc kubenswrapper[4877]: E0128 16:35:27.329932 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:27 crc kubenswrapper[4877]: E0128 16:35:27.330012 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.343462 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.358803 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.370594 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.384416 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.394881 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.400689 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.400735 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.400745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.400762 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.400772 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.415316 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.439025 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.454643 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.470091 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.484863 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.502880 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.502933 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.502942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.502961 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.502970 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.505523 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.521529 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.537504 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.549714 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.573163 4877 generic.go:334] "Generic (PLEG): container finished" podID="d5c6b6a7-d88e-419a-b28a-a4ae06d24576" containerID="7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff" exitCode=0 Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.573224 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerDied","Data":"7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.588905 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.601989 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.605913 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.606053 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.606214 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.606357 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.606446 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.619324 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.633436 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.658454 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.674432 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.688679 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.700491 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.710383 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.710423 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.710438 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.710457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.710488 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.718620 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.754386 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.801129 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.812242 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.812282 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.812294 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.812310 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.812322 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.820832 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.840291 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.850365 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.915175 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.915232 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.915246 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.915268 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:27 crc kubenswrapper[4877]: I0128 16:35:27.915282 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:27Z","lastTransitionTime":"2026-01-28T16:35:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.018278 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.018320 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.018333 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.018348 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.018360 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.121424 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.121501 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.121513 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.121527 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.121540 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.228498 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.228809 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.228819 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.228834 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.228844 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.283778 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 17:43:30.212489885 +0000 UTC Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.329414 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:28 crc kubenswrapper[4877]: E0128 16:35:28.329618 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.334715 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.334771 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.334787 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.334809 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.334825 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.437283 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.437328 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.437340 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.437358 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.437371 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.541452 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.541529 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.541545 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.541566 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.541582 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.584245 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" event={"ID":"d5c6b6a7-d88e-419a-b28a-a4ae06d24576","Type":"ContainerStarted","Data":"7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.588910 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.589243 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.597631 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.617875 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.622731 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.639955 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.643379 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.643419 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.643430 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.643445 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.643466 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.656707 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.668695 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.680965 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.693401 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.711334 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.725295 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.739777 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.746533 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.746610 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.746624 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.746645 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.746658 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.750668 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.763277 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.775347 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.785651 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.807397 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.828778 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.847879 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.848929 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.848958 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.848966 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.848985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.849000 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.867316 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.883832 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.900561 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.913459 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.928002 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.947659 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.952526 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.952557 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.952566 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.952581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.952592 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:28Z","lastTransitionTime":"2026-01-28T16:35:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.970168 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.986632 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:28 crc kubenswrapper[4877]: I0128 16:35:28.998880 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:28Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.015031 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.029961 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.059129 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.059202 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.059216 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.059242 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.059263 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.163737 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.163804 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.163818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.163841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.163854 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.267090 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.267150 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.267160 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.267183 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.267196 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.284318 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 04:58:05.462966479 +0000 UTC Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.330112 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.330193 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:29 crc kubenswrapper[4877]: E0128 16:35:29.330279 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:29 crc kubenswrapper[4877]: E0128 16:35:29.330348 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.371220 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.371292 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.371309 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.371338 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.371355 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.475819 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.475902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.475936 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.475963 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.475976 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.579665 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.579767 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.579785 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.579835 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.579851 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.592873 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.593381 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.629633 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.645061 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.662499 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.682431 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.682753 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.682840 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.682936 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.683058 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.686355 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.706968 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.723697 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.741503 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.757340 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.781892 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.786153 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.786355 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.786447 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.786589 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.786672 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.796895 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.817825 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.839557 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.854865 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.865891 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.889914 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.890577 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.890592 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.890612 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.890627 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.891562 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:29Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.993665 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.993742 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.993753 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.993772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:29 crc kubenswrapper[4877]: I0128 16:35:29.993785 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:29Z","lastTransitionTime":"2026-01-28T16:35:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.097297 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.097354 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.097367 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.097386 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.097398 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.200348 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.200429 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.200457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.200516 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.200536 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.285209 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 14:45:34.762517697 +0000 UTC Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.303834 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.303879 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.303892 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.303913 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.303927 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.330318 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:30 crc kubenswrapper[4877]: E0128 16:35:30.330546 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.409108 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.409185 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.409210 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.409243 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.409265 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.523912 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.523969 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.523980 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.523999 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.524011 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.596316 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.627396 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.627442 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.627455 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.627489 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.627504 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.731058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.731104 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.731116 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.731136 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.731152 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.833359 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.833397 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.833406 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.833424 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.833434 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.936104 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.936160 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.936171 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.936190 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:30 crc kubenswrapper[4877]: I0128 16:35:30.936203 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:30Z","lastTransitionTime":"2026-01-28T16:35:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.038933 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.038989 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.038999 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.039015 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.039027 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.142537 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.142602 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.142615 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.142635 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.142655 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.246051 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.246120 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.246139 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.246166 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.246185 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.286316 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 18:18:37.365265222 +0000 UTC Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.330303 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.330353 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:31 crc kubenswrapper[4877]: E0128 16:35:31.330592 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:31 crc kubenswrapper[4877]: E0128 16:35:31.330763 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.348368 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.348432 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.348452 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.348507 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.348536 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.451264 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.451322 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.451336 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.451358 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.451374 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.554751 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.554807 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.554818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.554839 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.554852 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.605171 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.680592 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.680632 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.680640 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.680654 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.680663 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.784189 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.784242 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.784268 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.784302 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.784321 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.887845 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.887906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.887920 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.887943 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.887962 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.991354 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.991418 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.991430 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.991450 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:31 crc kubenswrapper[4877]: I0128 16:35:31.991462 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:31Z","lastTransitionTime":"2026-01-28T16:35:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.094466 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.094542 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.094555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.094575 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.094588 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.198280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.198362 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.198387 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.198426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.198451 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.286538 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 02:32:38.684396987 +0000 UTC Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.301772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.301871 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.301902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.301932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.301953 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.330326 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:32 crc kubenswrapper[4877]: E0128 16:35:32.330524 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.404838 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.404907 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.404924 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.404947 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.404963 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.508058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.508111 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.508121 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.508143 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.508156 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.610932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.611006 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.611021 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.611041 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.611054 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.611462 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/0.log" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.616030 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6" exitCode=1 Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.616122 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.617096 4877 scope.go:117] "RemoveContainer" containerID="ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.635972 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.654450 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.676356 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.705011 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.713047 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.713176 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.713195 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.713218 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.713232 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.723177 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.737282 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.752253 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.766273 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.790702 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:31Z\\\",\\\"message\\\":\\\"128 16:35:31.636374 6228 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 16:35:31.636403 6228 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 16:35:31.636460 6228 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 16:35:31.636519 6228 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 16:35:31.636559 6228 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 16:35:31.636602 6228 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 16:35:31.636615 6228 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 16:35:31.636632 6228 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 16:35:31.636643 6228 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 16:35:31.636649 6228 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 16:35:31.636655 6228 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 16:35:31.636661 6228 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 16:35:31.636672 6228 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 16:35:31.636683 6228 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 16:35:31.636686 6228 factory.go:656] Stopping watch factory\\\\nI0128 16:35:31.636714 6228 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.816586 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.816631 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.816641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.816658 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.816667 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.817049 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.834104 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.849638 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.866299 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.880493 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:32Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.919954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.920004 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.920015 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.920045 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:32 crc kubenswrapper[4877]: I0128 16:35:32.920057 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:32Z","lastTransitionTime":"2026-01-28T16:35:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.022884 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.022942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.022956 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.022979 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.022995 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.049652 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.049899 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:35:49.049863915 +0000 UTC m=+52.608190803 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.049982 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.050023 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.050132 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.050225 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:49.050203424 +0000 UTC m=+52.608530482 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.050276 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.050414 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:49.050384618 +0000 UTC m=+52.608711666 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.127266 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.127347 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.127389 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.127417 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.127434 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.151199 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.151262 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151401 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151419 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151431 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151438 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151465 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151513 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151497 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:49.151467621 +0000 UTC m=+52.709794509 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.151586 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:35:49.151554344 +0000 UTC m=+52.709881252 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.230638 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.230706 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.230722 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.230749 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.230779 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.287326 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 17:12:20.429651233 +0000 UTC Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.330103 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.330116 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.330299 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.330352 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.333463 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.333535 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.333548 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.333589 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.333602 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.432907 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.433022 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.433051 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.433090 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.433129 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.449607 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.455189 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.455267 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.455288 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.455318 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.455375 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.470921 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.476060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.476117 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.476136 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.476164 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.476182 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.494315 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.499128 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.499188 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.499206 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.499232 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.499251 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.518222 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.524273 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.524327 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.524338 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.524359 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.524371 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.542293 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: E0128 16:35:33.542455 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.544650 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.544724 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.544743 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.544772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.544794 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.636761 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/0.log" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.641414 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.641597 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.646491 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.646543 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.646555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.646583 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.646595 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.660260 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.687345 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:31Z\\\",\\\"message\\\":\\\"128 16:35:31.636374 6228 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 16:35:31.636403 6228 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 16:35:31.636460 6228 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 16:35:31.636519 6228 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 16:35:31.636559 6228 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 16:35:31.636602 6228 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 16:35:31.636615 6228 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 16:35:31.636632 6228 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 16:35:31.636643 6228 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 16:35:31.636649 6228 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 16:35:31.636655 6228 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 16:35:31.636661 6228 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 16:35:31.636672 6228 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 16:35:31.636683 6228 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 16:35:31.636686 6228 factory.go:656] Stopping watch factory\\\\nI0128 16:35:31.636714 6228 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.708776 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.728907 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.750350 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.752445 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.752561 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.752580 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.752607 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.752628 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.793012 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.812598 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.832744 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.858772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.858840 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.858854 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.858878 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.858895 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.875203 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.893306 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.906292 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.919218 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.933893 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.952587 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:33Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.962838 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.962938 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.962966 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.963006 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:33 crc kubenswrapper[4877]: I0128 16:35:33.963038 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:33Z","lastTransitionTime":"2026-01-28T16:35:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.066913 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.066979 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.066997 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.067020 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.067037 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.170585 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.170632 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.170642 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.170664 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.170679 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.280848 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.280922 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.280937 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.280960 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.280975 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.287511 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 16:43:03.947617167 +0000 UTC Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.330514 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:34 crc kubenswrapper[4877]: E0128 16:35:34.330782 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.339346 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st"] Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.340784 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.344106 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.345610 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.365703 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6f86e157-eaeb-461b-b2e7-03c6a119c22e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.365762 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6f86e157-eaeb-461b-b2e7-03c6a119c22e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.365829 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz7hn\" (UniqueName: \"kubernetes.io/projected/6f86e157-eaeb-461b-b2e7-03c6a119c22e-kube-api-access-rz7hn\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.365866 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6f86e157-eaeb-461b-b2e7-03c6a119c22e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.366133 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.384649 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.385430 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.385465 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.385541 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.385563 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.385287 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.403627 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.418532 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.445887 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.466590 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz7hn\" (UniqueName: \"kubernetes.io/projected/6f86e157-eaeb-461b-b2e7-03c6a119c22e-kube-api-access-rz7hn\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.466663 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6f86e157-eaeb-461b-b2e7-03c6a119c22e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.466650 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:31Z\\\",\\\"message\\\":\\\"128 16:35:31.636374 6228 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 16:35:31.636403 6228 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 16:35:31.636460 6228 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 16:35:31.636519 6228 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 16:35:31.636559 6228 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 16:35:31.636602 6228 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 16:35:31.636615 6228 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 16:35:31.636632 6228 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 16:35:31.636643 6228 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 16:35:31.636649 6228 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 16:35:31.636655 6228 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 16:35:31.636661 6228 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 16:35:31.636672 6228 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 16:35:31.636683 6228 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 16:35:31.636686 6228 factory.go:656] Stopping watch factory\\\\nI0128 16:35:31.636714 6228 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.467678 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6f86e157-eaeb-461b-b2e7-03c6a119c22e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.467792 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6f86e157-eaeb-461b-b2e7-03c6a119c22e-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.467810 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6f86e157-eaeb-461b-b2e7-03c6a119c22e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.468242 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6f86e157-eaeb-461b-b2e7-03c6a119c22e-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.474260 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6f86e157-eaeb-461b-b2e7-03c6a119c22e-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.490076 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.490138 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.490158 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.490187 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.490212 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.490227 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.500013 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz7hn\" (UniqueName: \"kubernetes.io/projected/6f86e157-eaeb-461b-b2e7-03c6a119c22e-kube-api-access-rz7hn\") pod \"ovnkube-control-plane-749d76644c-2p4st\" (UID: \"6f86e157-eaeb-461b-b2e7-03c6a119c22e\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.507077 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.521431 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.534466 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.561472 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.577728 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.590615 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.593172 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.593235 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.593255 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.593281 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.593298 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.606674 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.626936 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.647631 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/1.log" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.648212 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/0.log" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.652836 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4" exitCode=1 Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.652890 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.652959 4877 scope.go:117] "RemoveContainer" containerID="ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.653815 4877 scope.go:117] "RemoveContainer" containerID="4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4" Jan 28 16:35:34 crc kubenswrapper[4877]: E0128 16:35:34.653993 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.662657 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.678152 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.702966 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.703021 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.703034 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.703057 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.703071 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.710366 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.737302 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.753514 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.767365 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.781316 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.793786 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.806227 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.806273 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.806283 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.806300 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.806287 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.806312 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.819044 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.834130 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.849571 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.861167 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.880981 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:31Z\\\",\\\"message\\\":\\\"128 16:35:31.636374 6228 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 16:35:31.636403 6228 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 16:35:31.636460 6228 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 16:35:31.636519 6228 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 16:35:31.636559 6228 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 16:35:31.636602 6228 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 16:35:31.636615 6228 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 16:35:31.636632 6228 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 16:35:31.636643 6228 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 16:35:31.636649 6228 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 16:35:31.636655 6228 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 16:35:31.636661 6228 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 16:35:31.636672 6228 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 16:35:31.636683 6228 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 16:35:31.636686 6228 factory.go:656] Stopping watch factory\\\\nI0128 16:35:31.636714 6228 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.897463 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.908944 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.908999 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.909014 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.909043 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.909058 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:34Z","lastTransitionTime":"2026-01-28T16:35:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:34 crc kubenswrapper[4877]: I0128 16:35:34.909728 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.012220 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.012268 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.012278 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.012295 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.012306 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.115700 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.115832 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.115877 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.115904 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.115921 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.218846 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.218936 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.218954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.218980 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.219000 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.288552 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 02:00:26.222128529 +0000 UTC Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.322350 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.322405 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.322416 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.322437 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.322453 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.329716 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.329805 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:35 crc kubenswrapper[4877]: E0128 16:35:35.329874 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:35 crc kubenswrapper[4877]: E0128 16:35:35.329943 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.426506 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.426605 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.426623 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.426652 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.426670 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.529519 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.529572 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.529581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.529598 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.529610 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.633648 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.634040 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.634146 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.634273 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.634375 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.659149 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" event={"ID":"6f86e157-eaeb-461b-b2e7-03c6a119c22e","Type":"ContainerStarted","Data":"94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.659254 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" event={"ID":"6f86e157-eaeb-461b-b2e7-03c6a119c22e","Type":"ContainerStarted","Data":"ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.659278 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" event={"ID":"6f86e157-eaeb-461b-b2e7-03c6a119c22e","Type":"ContainerStarted","Data":"0da425a2c7b4aa671a0a77e803177c320d8b0489d5f0768c372391db0f33f16e"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.662328 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/1.log" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.684135 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.702421 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.715892 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.734578 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.737348 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.737441 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.737489 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.737512 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.737524 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.752208 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.773387 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:31Z\\\",\\\"message\\\":\\\"128 16:35:31.636374 6228 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 16:35:31.636403 6228 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 16:35:31.636460 6228 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 16:35:31.636519 6228 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 16:35:31.636559 6228 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 16:35:31.636602 6228 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 16:35:31.636615 6228 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 16:35:31.636632 6228 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 16:35:31.636643 6228 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 16:35:31.636649 6228 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 16:35:31.636655 6228 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 16:35:31.636661 6228 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 16:35:31.636672 6228 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 16:35:31.636683 6228 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 16:35:31.636686 6228 factory.go:656] Stopping watch factory\\\\nI0128 16:35:31.636714 6228 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.791240 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.804738 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.820201 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.836424 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.841342 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.841412 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.841425 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.841446 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.841464 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.848142 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-bh9bk"] Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.849140 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:35 crc kubenswrapper[4877]: E0128 16:35:35.849289 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.851131 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.860866 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.872350 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.885099 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.886746 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q848q\" (UniqueName: \"kubernetes.io/projected/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-kube-api-access-q848q\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.886817 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.900076 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.911813 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.923596 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.937872 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.944451 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.944517 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.944527 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.944551 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.944562 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:35Z","lastTransitionTime":"2026-01-28T16:35:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.961338 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.976642 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.988267 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q848q\" (UniqueName: \"kubernetes.io/projected/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-kube-api-access-q848q\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.988322 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:35 crc kubenswrapper[4877]: E0128 16:35:35.988557 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:35 crc kubenswrapper[4877]: E0128 16:35:35.988637 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:35:36.488616605 +0000 UTC m=+40.046943493 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:35 crc kubenswrapper[4877]: I0128 16:35:35.990939 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.012505 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:31Z\\\",\\\"message\\\":\\\"128 16:35:31.636374 6228 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 16:35:31.636403 6228 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 16:35:31.636460 6228 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 16:35:31.636519 6228 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 16:35:31.636559 6228 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 16:35:31.636602 6228 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 16:35:31.636615 6228 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 16:35:31.636632 6228 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 16:35:31.636643 6228 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 16:35:31.636649 6228 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 16:35:31.636655 6228 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 16:35:31.636661 6228 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 16:35:31.636672 6228 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 16:35:31.636683 6228 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 16:35:31.636686 6228 factory.go:656] Stopping watch factory\\\\nI0128 16:35:31.636714 6228 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.021865 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q848q\" (UniqueName: \"kubernetes.io/projected/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-kube-api-access-q848q\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.038921 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.048744 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.048799 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.048810 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.048828 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.048843 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.057176 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.070548 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.083169 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.096202 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.108734 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.128844 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.149569 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.152788 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.152847 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.152862 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.152887 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.152905 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.166177 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:36Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.255780 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.255822 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.255832 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.255846 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.255856 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.289268 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 20:54:53.882345855 +0000 UTC Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.330309 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:36 crc kubenswrapper[4877]: E0128 16:35:36.330561 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.359344 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.359394 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.359406 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.359433 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.359449 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.463931 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.464029 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.464059 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.464099 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.464121 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.494104 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:36 crc kubenswrapper[4877]: E0128 16:35:36.494411 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:36 crc kubenswrapper[4877]: E0128 16:35:36.494627 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:35:37.494589484 +0000 UTC m=+41.052916412 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.568542 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.568652 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.568673 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.568696 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.568736 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.674148 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.674251 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.674272 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.674305 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.674329 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.779113 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.779196 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.779208 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.779227 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.779243 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.883033 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.883099 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.883117 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.883144 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.883165 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.986922 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.986994 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.987019 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.987052 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:36 crc kubenswrapper[4877]: I0128 16:35:36.987076 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:36Z","lastTransitionTime":"2026-01-28T16:35:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.090871 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.091279 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.091313 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.091345 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.091369 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.194588 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.194666 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.194691 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.194711 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.194722 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.289543 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 09:55:01.632382536 +0000 UTC Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.298359 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.298406 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.298417 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.298437 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.298451 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.330337 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:37 crc kubenswrapper[4877]: E0128 16:35:37.330542 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.330871 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.330967 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:37 crc kubenswrapper[4877]: E0128 16:35:37.331130 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:37 crc kubenswrapper[4877]: E0128 16:35:37.331286 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.357010 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.373340 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.389428 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.401585 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.401659 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.401685 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.401723 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.401749 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.422980 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ab0f24514370c8c7cb1b66e30dc4c383fb6beec120c2a0b6bf97e91204121db6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:31Z\\\",\\\"message\\\":\\\"128 16:35:31.636374 6228 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 16:35:31.636403 6228 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 16:35:31.636460 6228 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 16:35:31.636519 6228 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 16:35:31.636559 6228 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 16:35:31.636602 6228 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 16:35:31.636615 6228 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 16:35:31.636632 6228 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 16:35:31.636643 6228 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 16:35:31.636649 6228 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 16:35:31.636655 6228 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 16:35:31.636661 6228 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 16:35:31.636672 6228 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 16:35:31.636683 6228 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 16:35:31.636686 6228 factory.go:656] Stopping watch factory\\\\nI0128 16:35:31.636714 6228 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.440548 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.466265 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.482089 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.502261 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.503313 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:37 crc kubenswrapper[4877]: E0128 16:35:37.503595 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:37 crc kubenswrapper[4877]: E0128 16:35:37.503682 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:35:39.50365677 +0000 UTC m=+43.061983668 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.507115 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.507172 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.507192 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.507219 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.507235 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.519138 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.530633 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.545025 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.562743 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.576215 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.596287 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.609683 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.610490 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.610521 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.610531 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.610552 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.610563 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.621413 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.713193 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.713235 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.713249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.713266 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.713281 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.816053 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.816099 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.816112 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.816133 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.816145 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.921117 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.921201 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.921228 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.921264 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:37 crc kubenswrapper[4877]: I0128 16:35:37.921285 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:37Z","lastTransitionTime":"2026-01-28T16:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.025269 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.025336 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.025357 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.025384 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.025402 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.128238 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.128285 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.128296 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.128315 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.128330 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.231575 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.231672 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.231702 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.231742 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.231768 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.289754 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 07:15:34.084613521 +0000 UTC Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.330027 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:38 crc kubenswrapper[4877]: E0128 16:35:38.330377 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.336641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.336749 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.336788 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.336825 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.336854 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.441406 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.441507 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.441526 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.441554 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.441571 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.544641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.544696 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.544710 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.544735 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.544749 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.647549 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.647589 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.647601 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.647618 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.647630 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.751210 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.751267 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.751284 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.751308 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.751325 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.855324 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.855390 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.855399 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.855424 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.855437 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.959003 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.959081 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.959100 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.959132 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:38 crc kubenswrapper[4877]: I0128 16:35:38.959153 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:38Z","lastTransitionTime":"2026-01-28T16:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.063193 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.063247 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.063263 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.063286 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.063299 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.166255 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.166668 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.166773 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.166875 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.166966 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.269559 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.269914 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.270004 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.270123 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.270215 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.290875 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 10:48:44.3372011 +0000 UTC Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.330529 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.330653 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:39 crc kubenswrapper[4877]: E0128 16:35:39.330761 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.330901 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:39 crc kubenswrapper[4877]: E0128 16:35:39.331059 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:39 crc kubenswrapper[4877]: E0128 16:35:39.331175 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.373295 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.373684 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.373755 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.373835 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.373907 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.476789 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.476900 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.476919 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.476948 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.476975 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.562632 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:39 crc kubenswrapper[4877]: E0128 16:35:39.562951 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:39 crc kubenswrapper[4877]: E0128 16:35:39.563148 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:35:43.563108446 +0000 UTC m=+47.121435364 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.579800 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.579873 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.579892 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.580416 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.580705 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.683961 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.684041 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.684060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.684759 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.684821 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.787808 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.787853 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.787864 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.787882 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.787893 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.890999 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.891050 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.891064 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.891086 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.891099 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.994472 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.994557 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.994576 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.994604 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:39 crc kubenswrapper[4877]: I0128 16:35:39.994624 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:39Z","lastTransitionTime":"2026-01-28T16:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.097178 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.097261 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.097276 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.097299 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.097315 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.200594 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.200674 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.200696 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.200724 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.200743 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.291621 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 09:34:29.259901525 +0000 UTC Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.303698 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.303782 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.303807 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.303841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.303864 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.330324 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:40 crc kubenswrapper[4877]: E0128 16:35:40.330532 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.406904 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.406973 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.406988 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.407008 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.407025 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.511085 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.511149 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.511169 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.511195 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.511217 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.613949 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.614028 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.614048 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.614074 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.614099 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.716784 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.716838 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.716850 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.716868 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.716878 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.819935 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.820002 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.820024 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.820054 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.820075 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.923377 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.923558 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.923596 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.923639 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:40 crc kubenswrapper[4877]: I0128 16:35:40.923667 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:40Z","lastTransitionTime":"2026-01-28T16:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.026127 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.026202 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.026220 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.026253 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.026272 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.129626 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.129698 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.129718 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.129745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.129766 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.233281 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.233349 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.233369 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.233399 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.233425 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.292663 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 12:40:02.308066887 +0000 UTC Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.330369 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.330466 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:41 crc kubenswrapper[4877]: E0128 16:35:41.330595 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.330628 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:41 crc kubenswrapper[4877]: E0128 16:35:41.330689 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:41 crc kubenswrapper[4877]: E0128 16:35:41.330887 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.336683 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.336741 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.336767 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.336801 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.336833 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.442188 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.442229 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.442240 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.442257 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.442269 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.544836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.544899 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.544911 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.544932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.544949 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.648835 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.648892 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.648903 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.648926 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.648940 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.752434 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.752503 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.752519 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.752544 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.752567 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.856065 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.856126 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.856139 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.856159 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.856173 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.959881 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.959937 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.960001 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.960025 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:41 crc kubenswrapper[4877]: I0128 16:35:41.960070 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:41Z","lastTransitionTime":"2026-01-28T16:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.064324 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.064376 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.064403 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.064422 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.064434 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.166797 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.166842 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.166859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.166882 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.166897 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.270453 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.270547 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.270562 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.270595 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.270612 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.293790 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 16:39:40.737942344 +0000 UTC Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.329501 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:42 crc kubenswrapper[4877]: E0128 16:35:42.329682 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.340543 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.341395 4877 scope.go:117] "RemoveContainer" containerID="4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4" Jan 28 16:35:42 crc kubenswrapper[4877]: E0128 16:35:42.341591 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.355364 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.370151 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.373460 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.373524 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.373535 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.373555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.373567 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.381780 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.395723 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.408122 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.418652 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.432706 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.444356 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.459531 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.472686 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.476349 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.476393 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.476406 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.476425 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.476440 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.481891 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.498923 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.522704 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.540247 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.553301 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.564012 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:42Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.579574 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.579961 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.580063 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.580166 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.580248 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.683680 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.683739 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.683771 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.683794 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.683807 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.787331 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.787710 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.787789 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.787861 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.787926 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.891138 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.891200 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.891211 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.891227 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.891257 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.994042 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.994115 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.994124 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.994140 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:42 crc kubenswrapper[4877]: I0128 16:35:42.994150 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:42Z","lastTransitionTime":"2026-01-28T16:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.097888 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.097959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.097985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.098013 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.098034 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.200537 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.200593 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.200609 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.200631 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.200649 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.294613 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 01:36:19.753467282 +0000 UTC Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.304415 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.304680 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.304723 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.304755 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.304776 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.330681 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.330739 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.330677 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.330870 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.330937 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.331015 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.408615 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.408672 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.408685 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.408706 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.408720 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.512374 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.512456 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.512489 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.512516 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.512672 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.560775 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.560845 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.560859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.560883 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.560898 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.576583 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:43Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.581726 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.581775 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.581793 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.581811 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.581824 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.595626 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:43Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.600453 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.600512 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.600529 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.600555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.600612 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.611054 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.611261 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.611349 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:35:51.611326826 +0000 UTC m=+55.169653714 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.614322 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:43Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.619227 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.619299 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.619322 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.619351 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.619370 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.634815 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:43Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.638457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.638522 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.638538 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.638563 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.638574 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.650272 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:43Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:43 crc kubenswrapper[4877]: E0128 16:35:43.650467 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.652738 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.652793 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.652807 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.652832 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.652851 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.755702 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.755795 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.755817 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.755848 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.755868 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.859878 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.859954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.859979 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.860014 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.860038 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.964001 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.964078 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.964101 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.964133 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:43 crc kubenswrapper[4877]: I0128 16:35:43.964157 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:43Z","lastTransitionTime":"2026-01-28T16:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.067690 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.067766 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.067787 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.067820 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.067843 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.171777 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.171863 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.171890 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.171919 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.171937 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.275704 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.275778 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.275801 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.275837 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.275863 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.295123 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 01:34:18.176016528 +0000 UTC Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.329929 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:44 crc kubenswrapper[4877]: E0128 16:35:44.330116 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.379035 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.379127 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.379152 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.379183 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.379207 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.482545 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.482628 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.482647 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.482675 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.482694 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.585937 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.585985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.585995 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.586011 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.586024 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.690189 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.690277 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.690295 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.690326 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.690351 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.795080 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.795139 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.795149 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.795168 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.795180 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.898637 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.898712 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.898735 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.898764 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:44 crc kubenswrapper[4877]: I0128 16:35:44.898790 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:44Z","lastTransitionTime":"2026-01-28T16:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.001809 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.002163 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.002249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.002346 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.002536 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.105454 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.105546 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.105560 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.105581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.105597 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.208607 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.208954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.209035 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.209120 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.209186 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.295986 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 07:36:27.040566795 +0000 UTC Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.312247 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.312326 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.312344 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.312374 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.312395 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.330657 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.330657 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.330829 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:45 crc kubenswrapper[4877]: E0128 16:35:45.331021 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:45 crc kubenswrapper[4877]: E0128 16:35:45.331126 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:45 crc kubenswrapper[4877]: E0128 16:35:45.331349 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.415324 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.415379 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.415391 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.415415 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.415435 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.518924 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.518964 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.518972 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.518988 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.518998 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.622024 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.622074 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.622083 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.622102 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.622112 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.724396 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.724438 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.724451 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.724534 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.724551 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.828659 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.828867 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.828905 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.828940 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.828965 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.932058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.932103 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.932111 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.932128 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:45 crc kubenswrapper[4877]: I0128 16:35:45.932138 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:45Z","lastTransitionTime":"2026-01-28T16:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.039606 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.039726 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.039743 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.039762 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.039774 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.143263 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.143341 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.143359 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.143386 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.143407 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.245995 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.246036 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.246047 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.246064 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.246077 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.296629 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 03:20:06.427461207 +0000 UTC Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.330229 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:46 crc kubenswrapper[4877]: E0128 16:35:46.330392 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.349970 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.350028 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.350045 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.350073 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.350091 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.454093 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.454167 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.454185 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.454215 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.454236 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.557912 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.557994 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.558004 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.558034 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.558046 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.661301 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.661371 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.661387 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.661415 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.661439 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.765152 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.765226 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.765244 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.765274 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.765293 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.868277 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.868348 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.868368 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.868395 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.868414 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.972117 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.972173 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.972184 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.972206 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:46 crc kubenswrapper[4877]: I0128 16:35:46.972220 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:46Z","lastTransitionTime":"2026-01-28T16:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.075697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.075788 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.075813 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.075850 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.075878 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.178739 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.178812 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.178832 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.178856 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.178873 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.281307 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.281381 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.281393 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.281410 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.281425 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.297362 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 12:48:30.621872769 +0000 UTC Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.330465 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.330538 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.330620 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:47 crc kubenswrapper[4877]: E0128 16:35:47.330708 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:47 crc kubenswrapper[4877]: E0128 16:35:47.330828 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:47 crc kubenswrapper[4877]: E0128 16:35:47.331630 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.346676 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.360518 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.372798 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.384339 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.384767 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.384816 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.384825 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.384842 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.384855 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.401963 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.416206 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.461810 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.486747 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.487027 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.487053 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.487063 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.487079 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.487092 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.509095 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.520302 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.542741 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.560085 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.574345 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.588562 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.589661 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.589721 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.589737 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.589756 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.589770 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.604834 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.622315 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:47Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.693384 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.693458 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.693542 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.693577 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.693608 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.796623 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.796699 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.796720 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.796749 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.796770 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.900365 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.900418 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.900428 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.900445 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:47 crc kubenswrapper[4877]: I0128 16:35:47.900456 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:47Z","lastTransitionTime":"2026-01-28T16:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.004021 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.004099 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.004124 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.004168 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.004193 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.106919 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.106985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.106998 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.107016 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.107029 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.210090 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.210145 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.210160 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.210179 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.210193 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.297571 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 00:45:02.782437189 +0000 UTC Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.312501 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.312571 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.312585 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.312603 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.312617 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.329815 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:48 crc kubenswrapper[4877]: E0128 16:35:48.329951 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.415690 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.415745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.415757 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.415779 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.415801 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.518852 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.518945 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.518957 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.518973 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.518986 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.622996 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.623072 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.623090 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.623117 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.623137 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.728044 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.728110 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.728128 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.728156 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.728176 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.831567 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.831641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.831656 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.831681 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.831698 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.916847 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.934382 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.936800 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.936973 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.936997 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.937022 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.937067 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:48Z","lastTransitionTime":"2026-01-28T16:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.941286 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:48Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.956161 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:48Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:48 crc kubenswrapper[4877]: I0128 16:35:48.976165 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:48Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.000814 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:48Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.019319 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.038051 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.040386 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.040508 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.040539 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.040581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.040611 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.071600 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.084089 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.084412 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:36:21.084355158 +0000 UTC m=+84.642682056 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.084561 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.084621 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.084782 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.084895 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:36:21.084870261 +0000 UTC m=+84.643197329 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.084791 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.084978 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:36:21.084965424 +0000 UTC m=+84.643292322 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.091725 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.107516 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.124742 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.142086 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.143242 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.143311 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.143330 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.143355 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.143370 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.158478 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.169951 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.181438 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.185180 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.185333 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185428 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185557 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185500 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185579 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185589 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185605 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185663 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:36:21.185636946 +0000 UTC m=+84.743964004 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.185686 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:36:21.185677717 +0000 UTC m=+84.744004825 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.197414 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.209520 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:49Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.247246 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.247326 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.247353 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.247394 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.247422 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.298395 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 12:26:28.964124379 +0000 UTC Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.329907 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.329963 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.330017 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.330175 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.330352 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:49 crc kubenswrapper[4877]: E0128 16:35:49.330539 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.350860 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.350938 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.350962 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.350997 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.351022 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.455359 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.455428 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.455445 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.455470 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.455523 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.559611 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.559710 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.559736 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.559784 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.559812 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.662989 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.663029 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.663041 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.663059 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.663072 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.768459 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.768592 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.768632 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.768673 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.768699 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.872466 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.872595 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.872618 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.872647 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.872666 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.976012 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.976075 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.976089 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.976114 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:49 crc kubenswrapper[4877]: I0128 16:35:49.976130 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:49Z","lastTransitionTime":"2026-01-28T16:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.079032 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.079094 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.079106 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.079128 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.079141 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.183137 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.183207 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.183219 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.183240 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.183257 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.286302 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.286360 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.286375 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.286400 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.286417 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.299009 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 03:27:43.18035641 +0000 UTC Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.330910 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:50 crc kubenswrapper[4877]: E0128 16:35:50.331174 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.389058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.389124 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.389138 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.389159 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.389173 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.491961 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.492025 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.492035 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.492055 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.492067 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.596014 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.596114 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.596131 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.596161 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.596180 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.699436 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.699535 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.699556 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.699582 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.699602 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.803032 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.803081 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.803093 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.803124 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.803138 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.906424 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.906509 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.906533 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.906560 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:50 crc kubenswrapper[4877]: I0128 16:35:50.906574 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:50Z","lastTransitionTime":"2026-01-28T16:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.009951 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.010029 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.010067 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.010099 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.010120 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.113654 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.113730 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.113748 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.113776 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.113795 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.217270 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.217325 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.217339 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.217360 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.217378 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.299452 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 00:22:48.185213856 +0000 UTC Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.320618 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.320686 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.320705 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.320732 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.320752 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.329975 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.330058 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.329975 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:51 crc kubenswrapper[4877]: E0128 16:35:51.330153 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:51 crc kubenswrapper[4877]: E0128 16:35:51.330255 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:51 crc kubenswrapper[4877]: E0128 16:35:51.330543 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.424056 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.424127 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.424144 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.424171 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.424190 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.528428 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.528587 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.528613 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.528648 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.528677 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.615116 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:51 crc kubenswrapper[4877]: E0128 16:35:51.615346 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:51 crc kubenswrapper[4877]: E0128 16:35:51.615456 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:36:07.615427979 +0000 UTC m=+71.173754887 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.632241 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.632280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.632293 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.632311 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.632322 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.736997 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.737098 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.737116 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.737145 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.737163 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.840572 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.840674 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.840706 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.840748 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.840779 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.944201 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.944294 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.944314 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.944343 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:51 crc kubenswrapper[4877]: I0128 16:35:51.944364 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:51Z","lastTransitionTime":"2026-01-28T16:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.048126 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.048177 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.048188 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.048210 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.048224 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.152627 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.152692 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.152712 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.152745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.152771 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.257876 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.257967 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.257985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.258017 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.258036 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.300081 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 00:23:26.626657785 +0000 UTC Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.329860 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:52 crc kubenswrapper[4877]: E0128 16:35:52.330051 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.361545 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.361611 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.361623 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.361645 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.361662 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.465387 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.465833 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.465963 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.466119 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.466247 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.569164 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.569229 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.569243 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.569269 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.569285 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.673077 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.673152 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.673177 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.673216 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.673241 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.778219 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.778282 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.778293 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.778311 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.778324 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.881963 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.882032 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.882055 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.882089 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.882113 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.985680 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.985754 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.985773 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.985802 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:52 crc kubenswrapper[4877]: I0128 16:35:52.985824 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:52Z","lastTransitionTime":"2026-01-28T16:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.088777 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.088851 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.088870 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.088893 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.088909 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.192843 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.193046 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.193131 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.193657 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.193779 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.297131 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.297214 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.297239 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.297274 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.297299 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.301530 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 04:29:38.027261366 +0000 UTC Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.330415 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.330538 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.330610 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.331355 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.331661 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.331539 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.400269 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.400335 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.400354 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.400383 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.400404 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.503955 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.504379 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.504595 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.504719 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.504814 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.608848 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.609232 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.609382 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.609571 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.609718 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.668561 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.669046 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.669222 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.669371 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.669542 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.693652 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:53Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.699889 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.699942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.699959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.699986 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.700005 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.721215 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:53Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.727433 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.727517 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.727532 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.727558 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.727574 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.745970 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:53Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.751478 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.751589 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.751609 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.751640 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.751659 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.772063 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:53Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.777111 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.777147 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.777159 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.777181 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.777197 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.793408 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:53Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:53 crc kubenswrapper[4877]: E0128 16:35:53.793591 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.795838 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.795869 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.795883 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.795902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.795914 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.900212 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.900292 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.900311 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.900342 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:53 crc kubenswrapper[4877]: I0128 16:35:53.900362 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:53Z","lastTransitionTime":"2026-01-28T16:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.003467 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.003520 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.003529 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.003545 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.003555 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.107643 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.107720 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.107744 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.107778 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.107802 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.211151 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.211218 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.211237 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.211265 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.211285 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.302082 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 21:23:38.80473888 +0000 UTC Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.315106 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.315165 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.315177 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.315195 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.315210 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.330260 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:54 crc kubenswrapper[4877]: E0128 16:35:54.330793 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.331576 4877 scope.go:117] "RemoveContainer" containerID="4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.418433 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.418895 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.418926 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.418943 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.418954 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.521802 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.521848 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.521861 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.521879 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.521892 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.624787 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.624846 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.624856 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.624875 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.624886 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.728630 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.728703 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.728723 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.728750 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.728769 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.760040 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/1.log" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.764371 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.765274 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.786883 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.821963 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.831553 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.831875 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.831968 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.832107 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.832212 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.850063 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.873762 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.890159 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.905367 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.918853 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.929932 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.935019 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.935083 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.935097 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.935122 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.935138 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:54Z","lastTransitionTime":"2026-01-28T16:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.943294 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.960099 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.974247 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:54 crc kubenswrapper[4877]: I0128 16:35:54.992120 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:54Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.004320 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.018174 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.033233 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.037450 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.037508 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.037521 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.037543 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.037558 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.045610 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.064799 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.140759 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.140823 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.140836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.140861 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.140875 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.243738 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.243808 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.243818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.243840 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.243900 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.303068 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 18:56:26.658115821 +0000 UTC Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.330524 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.330631 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:55 crc kubenswrapper[4877]: E0128 16:35:55.330710 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:55 crc kubenswrapper[4877]: E0128 16:35:55.330851 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.330947 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:55 crc kubenswrapper[4877]: E0128 16:35:55.331036 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.346973 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.347131 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.347227 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.347307 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.347379 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.450939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.451032 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.451058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.451113 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.451152 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.553911 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.553977 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.553996 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.554025 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.554044 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.657394 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.657653 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.657697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.657732 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.657752 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.761328 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.761412 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.761437 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.761514 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.761539 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.769747 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/2.log" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.771057 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/1.log" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.778033 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288" exitCode=1 Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.778092 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.778136 4877 scope.go:117] "RemoveContainer" containerID="4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.779051 4877 scope.go:117] "RemoveContainer" containerID="46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288" Jan 28 16:35:55 crc kubenswrapper[4877]: E0128 16:35:55.779265 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.799176 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.817632 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.831067 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.853852 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.863685 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.863730 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.863743 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.863876 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.863916 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.867975 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.897577 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f560a1ade86b573d51bdfc777f5bd2e1f6cc3b2d30b5cec13eee90015c266f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"message\\\":\\\"e, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8441, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8442, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.167\\\\\\\", Port:8444, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0128 16:35:33.501619 6376 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501636 6376 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-node-5gw27\\\\nI0128 16:35:33.501630 6376 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 16:35:33.501651 6376 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-5gw27 in node \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.915908 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.939240 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.959631 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.966890 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.966946 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.966966 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.966997 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.967018 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:55Z","lastTransitionTime":"2026-01-28T16:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:55 crc kubenswrapper[4877]: I0128 16:35:55.982518 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.004670 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.030573 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.059754 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.070739 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.070808 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.070826 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.070854 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.070873 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.078778 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.098547 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.115914 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.129896 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.174810 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.174862 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.174879 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.174902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.174922 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.278670 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.278719 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.278732 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.278750 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.278763 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.303658 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 15:22:00.585257833 +0000 UTC Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.330355 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:56 crc kubenswrapper[4877]: E0128 16:35:56.330673 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.382312 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.382399 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.382424 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.382464 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.382527 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.485856 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.485919 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.485932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.485953 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.485968 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.588906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.588965 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.588975 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.588995 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.589009 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.692344 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.692410 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.692426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.692450 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.692470 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.784545 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/2.log" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.788042 4877 scope.go:117] "RemoveContainer" containerID="46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288" Jan 28 16:35:56 crc kubenswrapper[4877]: E0128 16:35:56.788217 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.795752 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.795837 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.795855 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.795876 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.795892 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.808109 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.832378 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.847304 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.864246 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.879817 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.892651 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.901317 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.901391 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.901408 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.901430 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.901450 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:56Z","lastTransitionTime":"2026-01-28T16:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.917629 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.940217 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.963578 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.984665 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:56 crc kubenswrapper[4877]: I0128 16:35:56.999668 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:56Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.004205 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.004257 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.004269 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.004295 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.004312 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.013467 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.027948 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.041362 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.059248 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.079288 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.094781 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.107249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.107316 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.107334 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.107356 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.107371 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.211282 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.211350 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.211363 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.211405 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.211420 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.303974 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 19:33:31.979445163 +0000 UTC Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.313874 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.313947 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.313965 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.314040 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.314063 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.330634 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.330634 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:57 crc kubenswrapper[4877]: E0128 16:35:57.331025 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.331280 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:57 crc kubenswrapper[4877]: E0128 16:35:57.331369 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:57 crc kubenswrapper[4877]: E0128 16:35:57.331468 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.366814 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.392258 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.411940 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.417891 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.417977 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.418003 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.418057 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.418083 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.429524 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.449840 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.468789 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.482973 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.496189 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.511833 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.520802 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.520854 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.520867 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.520894 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.520911 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.531362 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.546974 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.568865 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.589575 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.606207 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.624245 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.624312 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.624330 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.624358 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.624378 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.625179 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.642005 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.657262 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:57Z is after 2025-08-24T17:21:41Z" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.727677 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.727740 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.727753 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.727777 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.727792 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.831112 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.831182 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.831197 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.831218 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.831233 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.934894 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.934979 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.934998 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.935029 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:57 crc kubenswrapper[4877]: I0128 16:35:57.935051 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:57Z","lastTransitionTime":"2026-01-28T16:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.038648 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.038700 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.038713 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.038734 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.038746 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.142432 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.142849 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.142859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.142877 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.142890 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.245347 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.245408 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.245421 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.245444 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.245457 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.304929 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 17:04:14.797473481 +0000 UTC Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.329838 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:35:58 crc kubenswrapper[4877]: E0128 16:35:58.330058 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.349205 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.349280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.349299 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.349330 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.349357 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.452446 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.452546 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.452564 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.452591 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.452609 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.556644 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.556720 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.556739 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.556768 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.556792 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.660426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.660523 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.660538 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.660565 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.660582 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.763315 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.763394 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.763408 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.763437 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.763452 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.866160 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.866216 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.866229 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.866253 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.866269 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.968770 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.968835 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.968852 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.968876 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:58 crc kubenswrapper[4877]: I0128 16:35:58.968891 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:58Z","lastTransitionTime":"2026-01-28T16:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.072547 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.072625 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.072647 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.072677 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.072698 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.176098 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.176156 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.176168 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.176190 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.176204 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.278998 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.279075 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.279089 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.279115 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.279131 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.305247 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 08:29:48.129690153 +0000 UTC Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.330195 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.330406 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.330661 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:35:59 crc kubenswrapper[4877]: E0128 16:35:59.330642 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:35:59 crc kubenswrapper[4877]: E0128 16:35:59.330943 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:35:59 crc kubenswrapper[4877]: E0128 16:35:59.331077 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.382216 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.382275 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.382288 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.382310 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.382329 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.485583 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.485664 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.485683 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.485713 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.485732 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.588779 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.588836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.588853 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.588875 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.588891 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.692066 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.692130 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.692146 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.692172 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.692188 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.795207 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.795266 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.795274 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.795294 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.795305 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.898950 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.899035 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.899077 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.899130 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:35:59 crc kubenswrapper[4877]: I0128 16:35:59.899156 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:35:59Z","lastTransitionTime":"2026-01-28T16:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.003106 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.003179 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.003196 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.003225 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.003246 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.106951 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.107060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.107078 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.107106 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.107124 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.210333 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.210417 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.210434 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.210466 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.210535 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.306343 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 08:38:16.440474686 +0000 UTC Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.313827 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.313902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.313916 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.313939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.313960 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.330567 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:00 crc kubenswrapper[4877]: E0128 16:36:00.330782 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.416942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.416981 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.416990 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.417006 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.417014 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.519835 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.519906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.519925 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.519954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.519974 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.623849 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.623914 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.623939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.623968 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.623985 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.727664 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.727738 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.727764 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.727800 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.727826 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.830523 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.830574 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.830583 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.830599 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.830639 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.934036 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.934099 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.934115 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.934141 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:00 crc kubenswrapper[4877]: I0128 16:36:00.934161 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:00Z","lastTransitionTime":"2026-01-28T16:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.037426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.037521 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.037535 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.037558 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.037575 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.140364 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.140422 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.140434 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.140455 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.140468 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.242710 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.242772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.242787 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.242810 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.242826 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.307033 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 13:23:25.521758432 +0000 UTC Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.329598 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.329610 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:01 crc kubenswrapper[4877]: E0128 16:36:01.329807 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.329916 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:01 crc kubenswrapper[4877]: E0128 16:36:01.330066 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:01 crc kubenswrapper[4877]: E0128 16:36:01.330259 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.345576 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.345632 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.345652 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.345676 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.345696 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.449066 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.449150 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.449164 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.449188 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.449202 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.551719 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.551782 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.551794 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.551816 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.551832 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.655803 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.655876 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.655888 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.655912 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.655925 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.759014 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.759091 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.759113 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.759143 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.759163 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.862221 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.862280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.862296 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.862356 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.862371 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.965641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.965688 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.965697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.965728 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:01 crc kubenswrapper[4877]: I0128 16:36:01.965740 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:01Z","lastTransitionTime":"2026-01-28T16:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.069157 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.069239 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.069262 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.069296 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.069319 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.172328 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.172384 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.172395 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.172421 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.172434 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.275306 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.275387 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.275401 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.275425 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.275440 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.308088 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 11:51:25.37024393 +0000 UTC Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.329578 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:02 crc kubenswrapper[4877]: E0128 16:36:02.329834 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.378944 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.379004 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.379015 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.379031 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.379045 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.481855 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.481907 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.481921 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.481942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.481953 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.584906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.584969 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.584983 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.584998 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.585009 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.687815 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.687873 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.687885 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.687903 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.687916 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.791181 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.791243 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.791257 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.791276 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.791290 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.893382 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.893537 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.893550 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.893570 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.893582 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.996328 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.996394 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.996414 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.996441 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:02 crc kubenswrapper[4877]: I0128 16:36:02.996459 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:02Z","lastTransitionTime":"2026-01-28T16:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.099018 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.099069 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.099081 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.099100 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.099113 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.202368 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.202436 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.202457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.202508 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.202526 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.305898 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.305967 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.305980 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.306006 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.306029 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.309275 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 12:04:06.085474978 +0000 UTC Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.329540 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.329571 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.329542 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:03 crc kubenswrapper[4877]: E0128 16:36:03.329740 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:03 crc kubenswrapper[4877]: E0128 16:36:03.329882 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:03 crc kubenswrapper[4877]: E0128 16:36:03.329999 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.410274 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.410341 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.410355 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.410375 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.410395 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.513535 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.513612 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.513631 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.513656 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.513674 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.616216 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.616294 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.616314 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.616347 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.616376 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.719757 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.719811 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.719823 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.719844 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.719858 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.824095 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.824153 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.824165 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.824186 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.824199 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.917498 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.917555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.917569 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.917591 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.917608 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: E0128 16:36:03.932263 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:03Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.936493 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.936532 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.936542 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.936559 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.936569 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: E0128 16:36:03.957308 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:03Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.962054 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.962129 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.962151 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.962183 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.962203 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: E0128 16:36:03.978308 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:03Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.983287 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.983346 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.983368 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.983395 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:03 crc kubenswrapper[4877]: I0128 16:36:03.983416 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:03Z","lastTransitionTime":"2026-01-28T16:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:03 crc kubenswrapper[4877]: E0128 16:36:03.999434 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:03Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.005948 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.005997 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.006024 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.006042 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.006053 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: E0128 16:36:04.021112 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:04Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:04 crc kubenswrapper[4877]: E0128 16:36:04.021271 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.023277 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.023321 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.023332 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.023347 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.023357 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.125959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.126083 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.126096 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.126113 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.126125 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.228744 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.228818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.228837 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.228866 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.228884 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.309962 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 17:34:51.294819231 +0000 UTC Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.329540 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:04 crc kubenswrapper[4877]: E0128 16:36:04.329755 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.331500 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.331569 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.331581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.331602 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.331616 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.434684 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.434737 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.434748 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.434770 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.434786 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.538165 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.538209 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.538219 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.538237 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.538255 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.641867 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.641929 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.641944 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.641970 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.641984 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.744559 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.744663 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.744690 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.744722 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.744743 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.848568 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.848634 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.848651 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.848677 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.848695 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.952376 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.952426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.952435 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.952454 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:04 crc kubenswrapper[4877]: I0128 16:36:04.952466 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:04Z","lastTransitionTime":"2026-01-28T16:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.056270 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.056326 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.056336 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.056356 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.056367 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.159742 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.159787 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.159799 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.159819 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.159833 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.262362 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.262432 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.262444 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.262460 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.262488 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.311169 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 11:10:47.277953383 +0000 UTC Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.329717 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.329747 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.329775 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:05 crc kubenswrapper[4877]: E0128 16:36:05.329950 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:05 crc kubenswrapper[4877]: E0128 16:36:05.330050 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:05 crc kubenswrapper[4877]: E0128 16:36:05.330119 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.365348 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.365457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.365523 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.365557 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.365578 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.467791 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.467837 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.467847 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.467863 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.467873 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.570732 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.570818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.570836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.570859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.570878 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.674104 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.674150 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.674166 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.674185 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.674198 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.776935 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.776986 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.777000 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.777018 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.777031 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.880008 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.880060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.880079 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.880100 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.880116 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.983293 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.983336 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.983347 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.983364 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:05 crc kubenswrapper[4877]: I0128 16:36:05.983374 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:05Z","lastTransitionTime":"2026-01-28T16:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.087523 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.087603 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.087629 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.087666 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.087692 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.191061 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.191133 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.191157 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.191188 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.191213 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.294157 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.294301 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.294336 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.294380 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.294408 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.311899 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 03:21:35.715005131 +0000 UTC Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.330293 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:06 crc kubenswrapper[4877]: E0128 16:36:06.330461 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.397434 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.397501 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.397513 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.397539 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.397551 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.500948 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.501000 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.501008 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.501024 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.501035 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.604116 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.604160 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.604170 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.604191 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.604203 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.707215 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.707271 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.707284 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.707305 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.707317 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.809820 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.809869 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.809882 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.809905 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.809918 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.913651 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.913803 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.913816 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.913836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:06 crc kubenswrapper[4877]: I0128 16:36:06.913847 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:06Z","lastTransitionTime":"2026-01-28T16:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.016615 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.016683 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.016746 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.016781 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.016804 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.120206 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.120290 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.120302 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.120322 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.120333 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.222917 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.222972 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.222993 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.223026 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.223056 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.312462 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 23:53:41.141414653 +0000 UTC Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.327231 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.327319 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.327337 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.327365 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.327384 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.329649 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.329701 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.329763 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:07 crc kubenswrapper[4877]: E0128 16:36:07.329884 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:07 crc kubenswrapper[4877]: E0128 16:36:07.330052 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:07 crc kubenswrapper[4877]: E0128 16:36:07.330177 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.344707 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.361957 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.374783 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.387144 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.415399 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.427014 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.431331 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.431409 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.431433 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.431459 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.431507 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.443649 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.461733 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.474637 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.493561 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.510812 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.524867 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.535046 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.535149 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.535167 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.535197 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.535216 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.543739 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.559650 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.576846 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.591035 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.606986 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:07Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.615639 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:07 crc kubenswrapper[4877]: E0128 16:36:07.615875 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:36:07 crc kubenswrapper[4877]: E0128 16:36:07.615986 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:36:39.615958484 +0000 UTC m=+103.174285372 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.638351 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.638444 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.638459 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.638495 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.638511 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.740877 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.740951 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.740977 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.741018 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.741044 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.843881 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.843942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.843956 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.843974 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.843987 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.946544 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.946653 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.946668 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.946691 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:07 crc kubenswrapper[4877]: I0128 16:36:07.946706 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:07Z","lastTransitionTime":"2026-01-28T16:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.052339 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.052406 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.052419 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.052442 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.052460 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.155282 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.155319 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.155329 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.155342 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.155352 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.257854 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.257894 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.257907 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.257922 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.257932 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.313099 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 17:46:32.906498932 +0000 UTC Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.329938 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:08 crc kubenswrapper[4877]: E0128 16:36:08.330086 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.331690 4877 scope.go:117] "RemoveContainer" containerID="46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288" Jan 28 16:36:08 crc kubenswrapper[4877]: E0128 16:36:08.331916 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.361570 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.361651 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.361674 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.361703 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.361722 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.464674 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.464745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.464765 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.464793 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.464814 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.568288 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.568359 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.568372 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.568388 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.568396 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.671278 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.671333 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.671344 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.671360 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.671374 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.774634 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.774697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.774712 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.774733 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.774749 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.878604 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.878717 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.878737 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.878764 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.878782 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.982036 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.982079 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.982093 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.982115 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:08 crc kubenswrapper[4877]: I0128 16:36:08.982130 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:08Z","lastTransitionTime":"2026-01-28T16:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.085064 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.085112 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.085124 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.085143 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.085155 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.187859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.187917 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.187928 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.187949 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.187965 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.292980 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.293053 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.293080 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.293114 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.293137 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.313505 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 02:54:26.424633575 +0000 UTC Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.330099 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.330172 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:09 crc kubenswrapper[4877]: E0128 16:36:09.330231 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.330257 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:09 crc kubenswrapper[4877]: E0128 16:36:09.330388 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:09 crc kubenswrapper[4877]: E0128 16:36:09.330613 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.396221 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.396272 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.396286 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.396307 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.396324 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.499615 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.499678 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.499689 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.499709 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.499722 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.602967 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.603039 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.603055 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.603120 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.603137 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.706970 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.707032 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.707046 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.707071 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.707085 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.809661 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.809698 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.809710 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.809729 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.809741 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.837779 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/0.log" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.837827 4877 generic.go:334] "Generic (PLEG): container finished" podID="2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a" containerID="1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e" exitCode=1 Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.837861 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerDied","Data":"1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.838309 4877 scope.go:117] "RemoveContainer" containerID="1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.862534 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.876903 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.894288 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.907611 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.912039 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.912086 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.912094 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.912110 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.912139 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:09Z","lastTransitionTime":"2026-01-28T16:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.923976 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.943524 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.956172 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.972832 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.984251 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:09 crc kubenswrapper[4877]: I0128 16:36:09.997042 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:09Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.005818 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.014657 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.014699 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.014723 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.014739 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.014750 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.017019 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.033115 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.043139 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.063590 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.076678 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.093203 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.117411 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.117457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.117469 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.117506 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.117519 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.221263 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.221314 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.221331 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.221357 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.221375 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.314559 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 03:23:35.777601455 +0000 UTC Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.324101 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.324136 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.324147 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.324168 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.324180 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.329650 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:10 crc kubenswrapper[4877]: E0128 16:36:10.329869 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.427345 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.427405 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.427423 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.427450 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.427514 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.530653 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.531023 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.531127 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.531361 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.531456 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.634615 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.635060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.635225 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.635365 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.635468 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.738342 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.738420 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.738436 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.738458 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.738494 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.840254 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.840597 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.840720 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.840815 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.840902 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.844781 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/0.log" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.844861 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerStarted","Data":"36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.865259 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.883635 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.901939 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.922268 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.936993 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.953111 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.953154 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.953166 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.953187 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.953198 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:10Z","lastTransitionTime":"2026-01-28T16:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.957646 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.972986 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:10 crc kubenswrapper[4877]: I0128 16:36:10.988330 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.001468 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:10Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.015110 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.031029 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.046984 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.055443 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.055528 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.055543 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.055566 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.055579 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.062494 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.077663 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.088686 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.100522 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.110795 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:11Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.158457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.158515 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.158526 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.158542 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.158552 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.261041 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.261113 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.261133 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.261186 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.261204 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.314755 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 02:21:55.90712414 +0000 UTC Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.330139 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.330177 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.330223 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:11 crc kubenswrapper[4877]: E0128 16:36:11.330339 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:11 crc kubenswrapper[4877]: E0128 16:36:11.330520 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:11 crc kubenswrapper[4877]: E0128 16:36:11.330643 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.364298 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.364692 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.364762 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.364839 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.364897 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.468447 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.468566 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.468592 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.468624 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.468648 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.570940 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.571015 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.571025 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.571049 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.571060 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.672838 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.672890 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.672901 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.672919 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.672932 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.775225 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.775269 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.775280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.775296 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.775340 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.878372 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.878426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.878438 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.878456 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.878468 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.981740 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.982052 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.982188 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.982264 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:11 crc kubenswrapper[4877]: I0128 16:36:11.982334 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:11Z","lastTransitionTime":"2026-01-28T16:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.085143 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.085197 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.085213 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.085235 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.085248 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.188775 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.188841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.188861 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.188886 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.188907 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.292167 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.292276 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.292299 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.292333 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.292356 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.314922 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 06:09:24.979982243 +0000 UTC Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.330287 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:12 crc kubenswrapper[4877]: E0128 16:36:12.330444 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.395678 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.396060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.396130 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.396197 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.396261 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.499206 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.499258 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.499275 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.499296 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.499312 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.601810 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.601857 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.601868 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.601887 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.601898 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.705057 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.705105 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.705116 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.705131 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.705142 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.809680 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.809749 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.809767 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.809789 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.809803 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.912625 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.912705 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.912729 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.912761 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:12 crc kubenswrapper[4877]: I0128 16:36:12.912786 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:12Z","lastTransitionTime":"2026-01-28T16:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.015908 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.015958 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.015970 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.015985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.015995 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.119796 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.119867 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.119884 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.119913 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.119936 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.222775 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.222830 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.222840 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.222856 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.222870 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.316657 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 02:23:13.545013685 +0000 UTC Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.326613 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.326808 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.326999 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.327108 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.327200 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.330594 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.330613 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:13 crc kubenswrapper[4877]: E0128 16:36:13.330957 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.330636 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:13 crc kubenswrapper[4877]: E0128 16:36:13.331213 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:13 crc kubenswrapper[4877]: E0128 16:36:13.331335 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.431090 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.431137 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.431146 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.431162 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.431173 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.534149 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.534256 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.534273 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.534301 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.534319 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.638162 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.638246 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.638263 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.638284 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.638299 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.742288 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.742353 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.742374 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.742401 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.742418 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.846037 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.846091 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.846105 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.846126 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.846141 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.948581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.948645 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.948657 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.948674 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:13 crc kubenswrapper[4877]: I0128 16:36:13.948686 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:13Z","lastTransitionTime":"2026-01-28T16:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.050836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.050886 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.050899 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.050946 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.050960 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.154581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.154688 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.154715 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.154747 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.154772 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.180689 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.180745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.180758 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.180777 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.180790 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: E0128 16:36:14.200749 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:14Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.209781 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.209841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.209853 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.209872 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.209884 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: E0128 16:36:14.227149 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:14Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.231461 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.231522 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.231534 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.231548 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.231557 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: E0128 16:36:14.248368 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:14Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.252448 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.252571 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.252598 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.252625 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.252644 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: E0128 16:36:14.266218 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:14Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.270398 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.270423 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.270432 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.270446 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.270456 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: E0128 16:36:14.286221 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:14Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:14 crc kubenswrapper[4877]: E0128 16:36:14.286351 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.287980 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.288017 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.288026 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.288041 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.288054 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.317727 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 10:53:40.132479155 +0000 UTC Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.330283 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:14 crc kubenswrapper[4877]: E0128 16:36:14.330411 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.390809 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.390884 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.390906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.390937 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.390961 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.494465 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.494594 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.494615 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.494641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.494661 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.598404 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.598516 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.598537 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.598565 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.598584 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.701699 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.701740 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.701750 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.701765 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.701775 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.805878 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.805947 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.805968 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.805996 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.806015 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.909841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.909906 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.909924 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.909952 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:14 crc kubenswrapper[4877]: I0128 16:36:14.909971 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:14Z","lastTransitionTime":"2026-01-28T16:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.012162 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.012265 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.012281 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.012302 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.012317 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.114627 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.114764 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.114797 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.114826 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.114847 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.218167 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.218257 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.218289 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.218327 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.218385 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.318633 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 08:56:19.040723488 +0000 UTC Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.320995 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.321059 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.321079 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.321103 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.321123 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.330564 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.330641 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:15 crc kubenswrapper[4877]: E0128 16:36:15.330720 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.330765 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:15 crc kubenswrapper[4877]: E0128 16:36:15.330901 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:15 crc kubenswrapper[4877]: E0128 16:36:15.331012 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.423889 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.423952 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.423969 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.424000 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.424018 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.527648 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.527710 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.527728 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.527754 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.527772 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.630559 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.630614 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.630631 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.630655 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.630673 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.733772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.733874 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.733893 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.733922 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.733943 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.837784 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.837869 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.837888 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.837913 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.837934 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.940830 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.940898 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.940910 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.940935 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:15 crc kubenswrapper[4877]: I0128 16:36:15.940949 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:15Z","lastTransitionTime":"2026-01-28T16:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.044670 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.044746 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.044763 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.044797 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.044823 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.148760 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.148833 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.148843 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.148863 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.148905 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.252853 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.252904 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.252916 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.252932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.252943 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.318957 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 05:58:15.80122615 +0000 UTC Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.329466 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:16 crc kubenswrapper[4877]: E0128 16:36:16.329641 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.356306 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.356367 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.356386 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.356410 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.356428 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.459985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.460055 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.460073 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.460096 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.460114 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.563335 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.563438 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.563470 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.563564 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.563596 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.666887 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.666954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.666966 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.666985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.666997 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.771261 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.771356 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.771376 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.771403 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.771424 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.873678 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.873747 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.873763 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.873781 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.873796 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.977110 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.977173 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.977197 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.977228 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:16 crc kubenswrapper[4877]: I0128 16:36:16.977252 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:16Z","lastTransitionTime":"2026-01-28T16:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.079967 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.080026 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.080044 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.080066 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.080082 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.183864 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.183914 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.183932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.183959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.183977 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.287400 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.287468 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.287524 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.287558 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.287577 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.319772 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 18:46:12.837017422 +0000 UTC Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.330644 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.330666 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:17 crc kubenswrapper[4877]: E0128 16:36:17.330919 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.330964 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:17 crc kubenswrapper[4877]: E0128 16:36:17.331157 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:17 crc kubenswrapper[4877]: E0128 16:36:17.331384 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.349292 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.370768 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.387774 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.393796 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.393840 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.393857 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.393883 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.393902 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.409835 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.427795 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.445255 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.459299 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.481343 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.496951 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.497012 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.497031 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.497055 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.497078 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.502130 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.519373 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.539826 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.561041 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.580665 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.598162 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.601053 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.601095 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.601137 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.601164 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.601180 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.614368 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.633793 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.648458 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:17Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.703685 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.703723 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.703736 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.703756 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.703769 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.805814 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.805851 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.805863 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.805882 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.805894 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.908135 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.908193 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.908205 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.908228 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:17 crc kubenswrapper[4877]: I0128 16:36:17.908275 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:17Z","lastTransitionTime":"2026-01-28T16:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.011186 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.011218 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.011226 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.011238 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.011248 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.114467 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.114555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.114573 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.114608 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.114644 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.225033 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.225089 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.225107 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.225133 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.225154 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.320231 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 10:42:13.4262258 +0000 UTC Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.328280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.328325 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.328344 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.328368 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.328385 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.329888 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:18 crc kubenswrapper[4877]: E0128 16:36:18.330084 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.349180 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.432176 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.432273 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.432348 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.432376 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.432394 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.452602 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.535712 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.535773 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.535791 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.535815 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.535833 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.639017 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.639077 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.639095 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.639120 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.639138 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.742683 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.742721 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.742734 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.742750 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.742765 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.845124 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.845189 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.845230 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.845258 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.845277 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.948249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.948336 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.948364 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.948397 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:18 crc kubenswrapper[4877]: I0128 16:36:18.948425 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:18Z","lastTransitionTime":"2026-01-28T16:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.052565 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.052604 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.052619 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.052640 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.052653 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.155876 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.155933 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.155945 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.155964 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.155974 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.259311 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.259377 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.259389 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.259409 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.259428 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.321284 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 03:00:38.087137451 +0000 UTC Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.329890 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.329951 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.330192 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:19 crc kubenswrapper[4877]: E0128 16:36:19.330410 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.330642 4877 scope.go:117] "RemoveContainer" containerID="46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288" Jan 28 16:36:19 crc kubenswrapper[4877]: E0128 16:36:19.330699 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:19 crc kubenswrapper[4877]: E0128 16:36:19.330957 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.363447 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.363536 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.363556 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.363581 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.363600 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.466919 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.466982 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.467051 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.467091 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.467111 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.570731 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.570799 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.570816 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.570843 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.570862 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.674520 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.674607 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.674638 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.674677 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.674701 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.777844 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.777941 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.777971 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.778010 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.778039 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.878911 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/2.log" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.880376 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.880429 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.880449 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.880509 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.880535 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.882746 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.883389 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.908115 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:19Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.944785 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:19Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.983078 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.983123 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.983132 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.983150 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.983160 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:19Z","lastTransitionTime":"2026-01-28T16:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:19 crc kubenswrapper[4877]: I0128 16:36:19.993978 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:19Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.012403 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.026525 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.036268 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.055191 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.078883 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4e02072-0a0b-46f8-bfd2-e280f3176882\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://03c82c3e6c52f08c4d2bf0308e9d1088a27023c2ec8ce0a0c7c163977c0b6b8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d01c6e8f334b7eb9b465bca94f19011489c9a5c9b1180110b3d88ab6e5a4c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456daa5d52777cc4d3373b3d6a540879c49cab8179b3d976f22a38ab529fe7ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://360cd59a83a45618daa042cda86ec261bbd3065748ae9f6a6207a5eff1b896e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f9373257785e093cc79ea2af752b21c428cb5c15d85f41ad397a6f113404770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.085193 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.085238 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.085253 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.085283 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.085299 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.093665 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.112672 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.127125 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.143261 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.155309 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.177028 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.188025 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.188062 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.188074 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.188092 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.188105 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.193120 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.210157 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42eb9c17-72db-47fe-bc43-6ab0b3f10a33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://363e810ae2dd4b41a9ba40fde0270f216d292d9b1d4f31191304a846ac176245\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.229692 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.247761 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.263061 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.290708 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.290760 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.290772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.290793 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.290805 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.322226 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 12:01:12.475023326 +0000 UTC Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.330152 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:20 crc kubenswrapper[4877]: E0128 16:36:20.330297 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.394230 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.394288 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.394305 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.394348 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.394367 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.498262 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.498318 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.498334 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.498355 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.498371 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.602347 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.602424 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.602441 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.602469 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.602516 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.705849 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.705910 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.705927 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.705954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.705972 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.809932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.809976 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.809992 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.810013 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.810026 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.889254 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/3.log" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.890243 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/2.log" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.894085 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" exitCode=1 Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.894133 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.894189 4877 scope.go:117] "RemoveContainer" containerID="46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.895144 4877 scope.go:117] "RemoveContainer" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" Jan 28 16:36:20 crc kubenswrapper[4877]: E0128 16:36:20.895331 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.916836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.916929 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.916954 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.916989 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.917013 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:20Z","lastTransitionTime":"2026-01-28T16:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.922532 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.945292 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.966877 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.983594 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:20 crc kubenswrapper[4877]: I0128 16:36:20.996904 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:20Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.008344 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42eb9c17-72db-47fe-bc43-6ab0b3f10a33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://363e810ae2dd4b41a9ba40fde0270f216d292d9b1d4f31191304a846ac176245\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.020160 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.020217 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.020229 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.020249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.020260 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.025783 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.037412 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.049768 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.062867 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.081175 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.084685 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.084906 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.084855563 +0000 UTC m=+148.643182491 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.085202 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.085345 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.085573 4877 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.085603 4877 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.085785 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.085741816 +0000 UTC m=+148.644068884 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.085830 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.085808198 +0000 UTC m=+148.644135166 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.099229 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.110723 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.123534 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.123591 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.123601 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.123617 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.123626 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.123829 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.134948 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.155374 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46af6a448a3fdd416124f472eb77cbb3c087c226b3f28b19c4a6655504366288\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:35:55Z\\\",\\\"message\\\":\\\"55.306529 6597 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI0128 16:35:55.306460 6597 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 16:35:55.306541 6597 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nF0128 16:35:55.306541 6597 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:35:55Z is after 2025-08-24T17:21:41Z]\\\\nI0128 16:35:55.306553 6597 default_network_controller.go:776] Recording success event on p\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:20Z\\\",\\\"message\\\":\\\"ssionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.239],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0128 16:36:20.381942 6945 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382348 6945 lb_config.go:1031] Cluster endpoints for openshift-operator-lifecycle-manager/packageserver-service for network=default are: map[]\\\\nI0128 16:36:20.382361 6945 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0128 16:36:20.382371 6945 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0128 16:36:20.382377 6945 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382369 6945 services_controller.go:443] Built service openshift-operator-lifecycle-manager/packageserver-service LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.153\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:5443, clusterEndpoints:services.lbEndpo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:36:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.175356 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4e02072-0a0b-46f8-bfd2-e280f3176882\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://03c82c3e6c52f08c4d2bf0308e9d1088a27023c2ec8ce0a0c7c163977c0b6b8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d01c6e8f334b7eb9b465bca94f19011489c9a5c9b1180110b3d88ab6e5a4c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456daa5d52777cc4d3373b3d6a540879c49cab8179b3d976f22a38ab529fe7ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://360cd59a83a45618daa042cda86ec261bbd3065748ae9f6a6207a5eff1b896e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f9373257785e093cc79ea2af752b21c428cb5c15d85f41ad397a6f113404770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.186123 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.186262 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.186402 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.186436 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.186453 4877 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.186547 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.186522058 +0000 UTC m=+148.744848956 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.186957 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.187007 4877 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.187029 4877 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.187117 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.187091442 +0000 UTC m=+148.745418350 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.191609 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.203787 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.226865 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.226902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.226913 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.226932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.226947 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.323368 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 13:20:29.278554144 +0000 UTC Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.329506 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.329653 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.329736 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.329991 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.329859 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.329827 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.330073 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.330088 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.330096 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.330104 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.330124 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.433912 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.433987 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.434006 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.434034 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.434052 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.537713 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.537775 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.537792 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.537819 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.537837 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.642066 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.642142 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.642163 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.642196 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.642218 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.745555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.745647 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.745676 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.745711 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.745735 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.849992 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.850060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.850079 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.850112 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.850138 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.908057 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/3.log" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.914916 4877 scope.go:117] "RemoveContainer" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" Jan 28 16:36:21 crc kubenswrapper[4877]: E0128 16:36:21.915229 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.931419 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.953852 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.953925 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.953947 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.953974 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.953995 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:21Z","lastTransitionTime":"2026-01-28T16:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.962708 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:20Z\\\",\\\"message\\\":\\\"ssionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.239],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0128 16:36:20.381942 6945 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382348 6945 lb_config.go:1031] Cluster endpoints for openshift-operator-lifecycle-manager/packageserver-service for network=default are: map[]\\\\nI0128 16:36:20.382361 6945 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0128 16:36:20.382371 6945 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0128 16:36:20.382377 6945 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382369 6945 services_controller.go:443] Built service openshift-operator-lifecycle-manager/packageserver-service LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.153\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:5443, clusterEndpoints:services.lbEndpo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:36:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:21 crc kubenswrapper[4877]: I0128 16:36:21.988982 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4e02072-0a0b-46f8-bfd2-e280f3176882\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://03c82c3e6c52f08c4d2bf0308e9d1088a27023c2ec8ce0a0c7c163977c0b6b8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d01c6e8f334b7eb9b465bca94f19011489c9a5c9b1180110b3d88ab6e5a4c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456daa5d52777cc4d3373b3d6a540879c49cab8179b3d976f22a38ab529fe7ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://360cd59a83a45618daa042cda86ec261bbd3065748ae9f6a6207a5eff1b896e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f9373257785e093cc79ea2af752b21c428cb5c15d85f41ad397a6f113404770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:21Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.006024 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.023552 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.039865 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.056735 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.059684 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.059746 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.059765 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.059799 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.059847 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.075204 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.092420 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.112668 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.132519 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.148452 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.163865 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.163945 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.163972 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.164013 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.164042 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.164373 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42eb9c17-72db-47fe-bc43-6ab0b3f10a33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://363e810ae2dd4b41a9ba40fde0270f216d292d9b1d4f31191304a846ac176245\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.185761 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.208846 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.226989 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.243577 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.265593 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.267453 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.267517 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.267532 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.267557 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.267572 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.285424 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:22Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.323904 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 06:10:49.186634177 +0000 UTC Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.330364 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:22 crc kubenswrapper[4877]: E0128 16:36:22.330556 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.371137 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.371204 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.371226 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.371251 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.371272 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.474206 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.474740 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.474927 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.475097 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.475248 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.579235 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.579312 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.579330 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.579362 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.579381 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.682549 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.682605 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.682617 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.682639 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.682653 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.786323 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.786384 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.786402 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.786426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.786444 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.891102 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.891177 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.891193 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.891221 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.891239 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.994459 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.994536 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.994550 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.994570 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:22 crc kubenswrapper[4877]: I0128 16:36:22.994583 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:22Z","lastTransitionTime":"2026-01-28T16:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.097633 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.097702 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.097723 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.097748 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.097766 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.201371 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.201467 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.201540 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.201575 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.201596 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.305068 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.305117 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.305125 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.305141 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.305152 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.324931 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 01:44:11.364980674 +0000 UTC Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.330246 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.330309 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.330248 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:23 crc kubenswrapper[4877]: E0128 16:36:23.330433 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:23 crc kubenswrapper[4877]: E0128 16:36:23.330675 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:23 crc kubenswrapper[4877]: E0128 16:36:23.330806 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.408936 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.409008 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.409022 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.409045 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.409060 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.512300 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.512396 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.512424 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.512457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.512527 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.617396 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.617463 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.617514 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.617543 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.617563 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.721038 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.721510 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.721523 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.721540 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.721552 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.825129 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.825190 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.825207 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.825232 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.825247 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.928443 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.928543 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.928561 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.928587 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:23 crc kubenswrapper[4877]: I0128 16:36:23.928606 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:23Z","lastTransitionTime":"2026-01-28T16:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.031759 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.031842 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.031866 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.031897 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.031923 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.135030 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.135084 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.135100 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.135126 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.135143 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.239761 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.239833 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.239859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.239893 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.239916 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.325930 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 04:18:04.807516192 +0000 UTC Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.330424 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:24 crc kubenswrapper[4877]: E0128 16:36:24.330703 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.344288 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.344358 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.344378 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.344413 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.344446 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.400650 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.400712 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.400733 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.400764 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.400784 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: E0128 16:36:24.418128 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.423780 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.423847 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.423871 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.423904 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.423932 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: E0128 16:36:24.441609 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.449459 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.449569 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.449590 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.449621 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.449669 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: E0128 16:36:24.466142 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.473012 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.473078 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.473095 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.473122 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.473141 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: E0128 16:36:24.495326 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.501556 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.501632 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.501655 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.501688 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.501708 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: E0128 16:36:24.523203 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:24Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:24 crc kubenswrapper[4877]: E0128 16:36:24.523582 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.525708 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.525781 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.525806 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.525839 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.525865 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.628343 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.628403 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.628421 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.628446 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.628465 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.731826 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.731900 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.731929 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.731959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.731981 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.835500 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.835588 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.835605 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.835630 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.835647 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.938548 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.938625 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.938637 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.938679 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:24 crc kubenswrapper[4877]: I0128 16:36:24.938693 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:24Z","lastTransitionTime":"2026-01-28T16:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.042676 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.042776 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.042812 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.042841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.042859 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.146261 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.146322 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.146339 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.146366 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.146386 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.250387 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.250452 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.250467 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.250523 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.250540 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.326505 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 00:28:11.371047521 +0000 UTC Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.330055 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.330125 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:25 crc kubenswrapper[4877]: E0128 16:36:25.330275 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.330361 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:25 crc kubenswrapper[4877]: E0128 16:36:25.330587 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:25 crc kubenswrapper[4877]: E0128 16:36:25.330820 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.354066 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.354127 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.354147 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.354175 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.354193 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.458266 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.458389 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.458408 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.458520 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.458544 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.563018 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.563061 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.563072 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.563089 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.563101 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.666587 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.666653 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.666670 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.666697 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.666721 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.771762 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.771860 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.771889 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.771925 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.771952 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.876721 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.876779 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.876791 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.876814 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.876829 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.980178 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.980251 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.980270 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.980292 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:25 crc kubenswrapper[4877]: I0128 16:36:25.980316 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:25Z","lastTransitionTime":"2026-01-28T16:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.083447 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.083558 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.083580 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.083607 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.083627 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.187633 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.187752 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.187771 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.187804 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.187860 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.291932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.292023 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.292049 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.292083 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.292104 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.326902 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 17:34:43.439204556 +0000 UTC Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.330239 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:26 crc kubenswrapper[4877]: E0128 16:36:26.330409 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.395946 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.396027 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.396051 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.396079 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.396125 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.500085 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.500169 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.500193 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.500228 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.500249 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.603856 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.604080 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.604104 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.604161 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.604179 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.707790 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.707864 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.707879 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.707909 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.707924 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.811943 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.812007 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.812021 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.812045 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.812058 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.916162 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.916242 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.916266 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.916300 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:26 crc kubenswrapper[4877]: I0128 16:36:26.916325 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:26Z","lastTransitionTime":"2026-01-28T16:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.019883 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.019959 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.019978 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.020004 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.020023 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.123684 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.123893 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.123930 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.124013 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.124038 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.226725 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.226779 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.226794 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.226817 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.226832 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.327927 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 12:32:33.126837246 +0000 UTC Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.329906 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.330020 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.330169 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:27 crc kubenswrapper[4877]: E0128 16:36:27.330178 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.330345 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.330381 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.330399 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.330421 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: E0128 16:36:27.330433 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.330438 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: E0128 16:36:27.330604 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.351227 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.370651 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.387757 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.420295 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:20Z\\\",\\\"message\\\":\\\"ssionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.239],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0128 16:36:20.381942 6945 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382348 6945 lb_config.go:1031] Cluster endpoints for openshift-operator-lifecycle-manager/packageserver-service for network=default are: map[]\\\\nI0128 16:36:20.382361 6945 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0128 16:36:20.382371 6945 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0128 16:36:20.382377 6945 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382369 6945 services_controller.go:443] Built service openshift-operator-lifecycle-manager/packageserver-service LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.153\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:5443, clusterEndpoints:services.lbEndpo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:36:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.433629 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.433700 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.433732 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.433778 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.433803 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.455314 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4e02072-0a0b-46f8-bfd2-e280f3176882\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://03c82c3e6c52f08c4d2bf0308e9d1088a27023c2ec8ce0a0c7c163977c0b6b8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d01c6e8f334b7eb9b465bca94f19011489c9a5c9b1180110b3d88ab6e5a4c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456daa5d52777cc4d3373b3d6a540879c49cab8179b3d976f22a38ab529fe7ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://360cd59a83a45618daa042cda86ec261bbd3065748ae9f6a6207a5eff1b896e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f9373257785e093cc79ea2af752b21c428cb5c15d85f41ad397a6f113404770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.495187 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.517833 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.537730 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.537812 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.537836 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.537873 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.537895 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.539313 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.559224 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.578635 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.593753 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.609651 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.634959 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.640852 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.640902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.640912 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.640932 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.640945 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.652804 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.664054 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.677683 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42eb9c17-72db-47fe-bc43-6ab0b3f10a33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://363e810ae2dd4b41a9ba40fde0270f216d292d9b1d4f31191304a846ac176245\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.693407 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.707197 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.717640 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:27Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.744934 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.745011 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.745033 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.745154 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.745192 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.848401 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.848471 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.848516 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.848725 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.848741 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.951939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.952002 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.952018 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.952044 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:27 crc kubenswrapper[4877]: I0128 16:36:27.952059 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:27Z","lastTransitionTime":"2026-01-28T16:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.055636 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.055692 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.055701 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.055718 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.055729 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.158205 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.158279 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.158302 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.158333 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.158349 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.260591 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.260662 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.260685 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.260718 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.260746 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.328816 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 19:34:42.729269336 +0000 UTC Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.331394 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:28 crc kubenswrapper[4877]: E0128 16:36:28.331768 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.364004 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.364081 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.364102 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.364135 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.364155 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.467015 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.467111 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.467136 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.467170 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.467194 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.571786 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.571856 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.571874 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.571902 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.571924 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.676331 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.676392 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.676403 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.676426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.676439 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.780568 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.780620 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.780632 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.780651 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.780667 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.884343 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.884402 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.884422 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.884453 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.884506 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.988213 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.988278 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.988296 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.988318 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:28 crc kubenswrapper[4877]: I0128 16:36:28.988363 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:28Z","lastTransitionTime":"2026-01-28T16:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.092148 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.092215 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.092233 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.092262 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.092287 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.241940 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.242026 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.242052 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.242080 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.242100 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.329283 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 09:30:20.248495451 +0000 UTC Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.329679 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:29 crc kubenswrapper[4877]: E0128 16:36:29.330101 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.330176 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:29 crc kubenswrapper[4877]: E0128 16:36:29.330401 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.330437 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:29 crc kubenswrapper[4877]: E0128 16:36:29.330838 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.344922 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.344973 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.344991 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.345016 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.345033 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.448936 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.448993 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.449007 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.449030 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.449043 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.552929 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.553002 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.553023 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.553050 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.553070 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.656351 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.656422 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.656440 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.656466 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.656540 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.767145 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.767225 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.767245 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.767279 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.767337 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.872375 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.872443 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.872461 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.872518 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.872542 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.976701 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.976784 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.976803 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.976833 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:29 crc kubenswrapper[4877]: I0128 16:36:29.976855 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:29Z","lastTransitionTime":"2026-01-28T16:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.080728 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.080798 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.080817 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.080845 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.080862 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.184602 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.184679 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.184701 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.184733 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.184754 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.289256 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.289342 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.289369 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.289413 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.289438 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.329844 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 12:39:31.636644439 +0000 UTC Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.329929 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:30 crc kubenswrapper[4877]: E0128 16:36:30.330208 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.393654 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.393737 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.393763 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.393794 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.393814 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.497451 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.497546 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.497564 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.497591 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.497610 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.600679 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.600766 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.600784 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.600812 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.600829 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.704857 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.705192 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.705281 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.705426 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.705543 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.808460 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.808560 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.808611 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.808641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.808655 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.912119 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.912217 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.912242 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.912279 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:30 crc kubenswrapper[4877]: I0128 16:36:30.912304 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:30Z","lastTransitionTime":"2026-01-28T16:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.015915 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.015989 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.016002 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.016017 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.016030 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.119707 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.119763 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.119776 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.119797 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.119811 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.222258 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.222324 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.222346 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.222375 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.222397 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.325122 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.325200 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.325222 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.325255 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.325276 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.329590 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.329622 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.329649 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:31 crc kubenswrapper[4877]: E0128 16:36:31.329743 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:31 crc kubenswrapper[4877]: E0128 16:36:31.329972 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.330029 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 11:23:49.056310773 +0000 UTC Jan 28 16:36:31 crc kubenswrapper[4877]: E0128 16:36:31.330215 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.428349 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.428421 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.428440 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.428469 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.428526 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.532088 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.532146 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.532162 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.532189 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.532211 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.636012 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.636148 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.636174 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.636243 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.636272 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.741354 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.741549 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.741573 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.741670 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.741696 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.846420 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.846540 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.846589 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.846621 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.846647 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.950090 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.950159 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.950178 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.950205 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:31 crc kubenswrapper[4877]: I0128 16:36:31.950224 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:31Z","lastTransitionTime":"2026-01-28T16:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.052812 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.052888 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.052908 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.052938 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.052959 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.157201 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.157280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.157304 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.157332 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.157352 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.261661 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.261767 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.261785 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.261818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.261839 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.330047 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:32 crc kubenswrapper[4877]: E0128 16:36:32.330283 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.330454 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 08:56:27.655529241 +0000 UTC Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.365368 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.365442 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.365469 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.365556 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.365600 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.469776 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.469849 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.469873 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.469904 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.469924 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.573301 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.573397 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.573434 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.573464 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.573526 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.677439 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.677558 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.677574 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.677598 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.677612 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.783562 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.783627 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.783652 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.783679 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.783695 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.887532 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.887621 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.887640 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.887671 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.887692 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.991844 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.991919 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.991938 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.991965 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:32 crc kubenswrapper[4877]: I0128 16:36:32.991984 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:32Z","lastTransitionTime":"2026-01-28T16:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.095110 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.095382 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.095465 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.095564 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.095625 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.199139 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.199193 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.199205 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.199225 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.199238 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.302554 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.302592 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.302600 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.302617 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.302628 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.330043 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:33 crc kubenswrapper[4877]: E0128 16:36:33.330238 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.330264 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.330068 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:33 crc kubenswrapper[4877]: E0128 16:36:33.330369 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:33 crc kubenswrapper[4877]: E0128 16:36:33.330745 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.330754 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 16:12:29.116125761 +0000 UTC Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.404939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.404976 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.404985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.405001 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.405010 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.508688 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.508960 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.508985 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.509021 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.509046 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.613923 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.614014 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.614038 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.614074 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.614101 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.718534 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.718627 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.718694 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.718731 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.718759 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.822027 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.822130 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.822158 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.822194 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.822220 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.926233 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.926333 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.926357 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.926399 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:33 crc kubenswrapper[4877]: I0128 16:36:33.926429 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:33Z","lastTransitionTime":"2026-01-28T16:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.029492 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.029563 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.029572 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.029594 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.029605 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.132499 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.132538 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.132547 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.132564 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.132579 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.235411 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.235518 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.235543 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.235576 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.235603 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.329522 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:34 crc kubenswrapper[4877]: E0128 16:36:34.329680 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.330877 4877 scope.go:117] "RemoveContainer" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.331191 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 11:51:08.451618648 +0000 UTC Jan 28 16:36:34 crc kubenswrapper[4877]: E0128 16:36:34.331190 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.338641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.338691 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.338707 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.338733 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.338752 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.442699 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.442779 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.442802 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.442835 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.442862 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.546931 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.546992 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.547012 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.547040 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.547060 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.650397 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.650556 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.650599 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.650641 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.650664 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.754118 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.754188 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.754212 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.754245 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.754270 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.858041 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.858117 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.858141 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.858178 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.858203 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.920136 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.920210 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.920230 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.920260 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.920284 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: E0128 16:36:34.940181 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.946711 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.946792 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.946812 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.946841 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.946862 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:34 crc kubenswrapper[4877]: E0128 16:36:34.971558 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.978340 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.978442 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.978470 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.978555 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:34 crc kubenswrapper[4877]: I0128 16:36:34.978580 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:34Z","lastTransitionTime":"2026-01-28T16:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: E0128 16:36:35.002287 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:34Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.007815 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.007869 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.007886 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.007912 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.007927 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: E0128 16:36:35.024153 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.030516 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.030573 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.030586 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.030627 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.030641 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: E0128 16:36:35.047181 4877 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0cc15e1d-6789-454c-9d3c-a9ac2a07dbf6\\\",\\\"systemUUID\\\":\\\"4dc3ada7-a2e1-4d9e-b4ad-be7988efc75f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:35Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:35 crc kubenswrapper[4877]: E0128 16:36:35.047420 4877 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.049970 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.050013 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.050026 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.050041 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.050052 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.152355 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.152418 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.152430 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.152530 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.152549 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.256454 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.256553 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.256566 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.256586 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.256599 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.329853 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.329944 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.330016 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:35 crc kubenswrapper[4877]: E0128 16:36:35.330179 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:35 crc kubenswrapper[4877]: E0128 16:36:35.330381 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:35 crc kubenswrapper[4877]: E0128 16:36:35.330823 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.331322 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 14:50:05.058842572 +0000 UTC Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.360663 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.360819 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.360843 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.360911 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.360932 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.465865 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.465938 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.465955 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.465986 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.466005 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.568694 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.568791 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.568815 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.568847 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.568866 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.672817 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.672903 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.672928 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.672960 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.672983 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.776988 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.777047 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.777058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.777080 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.777096 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.880406 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.880547 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.880569 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.880599 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.880617 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.985190 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.985261 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.985278 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.985301 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:35 crc kubenswrapper[4877]: I0128 16:36:35.985319 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:35Z","lastTransitionTime":"2026-01-28T16:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.089055 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.089141 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.089158 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.089186 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.089205 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.192666 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.192743 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.192761 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.192790 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.192810 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.296747 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.296834 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.296857 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.296881 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.296899 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.329787 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:36 crc kubenswrapper[4877]: E0128 16:36:36.329990 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.331823 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 06:04:03.367833486 +0000 UTC Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.401268 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.401363 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.401388 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.401423 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.401444 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.504298 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.504366 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.504383 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.504409 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.504427 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.608166 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.608257 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.608281 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.608314 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.608339 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.712207 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.712275 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.712291 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.712316 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.712334 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.815878 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.815942 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.815964 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.815987 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.816005 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.920449 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.920586 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.920606 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.920640 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:36 crc kubenswrapper[4877]: I0128 16:36:36.920660 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:36Z","lastTransitionTime":"2026-01-28T16:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.023428 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.023529 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.023545 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.023569 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.023586 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.135586 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.135664 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.135682 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.135712 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.135732 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.239148 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.239260 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.239290 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.239323 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.239346 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.330294 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.330294 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:37 crc kubenswrapper[4877]: E0128 16:36:37.330519 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.330540 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:37 crc kubenswrapper[4877]: E0128 16:36:37.330625 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:37 crc kubenswrapper[4877]: E0128 16:36:37.330929 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.332983 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 13:01:19.691615462 +0000 UTC Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.342457 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.342566 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.342593 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.342624 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.342648 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.348509 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42eb9c17-72db-47fe-bc43-6ab0b3f10a33\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://363e810ae2dd4b41a9ba40fde0270f216d292d9b1d4f31191304a846ac176245\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc02683c4b4816b09a9eb418fda2e33726ff56af4fb7dc2c830559feaf3a6d84\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.367631 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.387768 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hbxsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:36:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:08Z\\\",\\\"message\\\":\\\"2026-01-28T16:35:23+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc\\\\n2026-01-28T16:35:23+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_6b3db6a1-f9a2-4dbe-823e-95fad494a0fc to /host/opt/cni/bin/\\\\n2026-01-28T16:35:23Z [verbose] multus-daemon started\\\\n2026-01-28T16:35:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T16:36:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:36:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fpkkj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hbxsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.405903 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-x2fwz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3568823b-ecd1-4e61-a47f-fda701dd8796\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5d4647f92a50537057bd76673487e5d1960cb2324092b2f647dd21882a520a81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mn77b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:24Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-x2fwz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.425434 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"74a7c783-d8a8-43a2-964c-2eb22d8ecc8b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ae7252203c0ab023b5760a590258dbcffc514772fd544a7f4d47d0d136f51bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://54d10395449907d45db408bef248ecf2220d700df699af88b50e309d94541938\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://41fa74f6fb95850fee15b9340e5cc1d4d683723fb95d3bc2474aec0d4f2b6aba\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.441132 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"911ece97-f301-4880-b308-d23e6f42071b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d24a6a68dfc0269608134b70620f1fc2ed73ea509b8f0152948ec8961679bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://617e761209376efc1375de420ec95948c4d78d70a0b577357ad157c85d23cab0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6eebc783d401483d227acf6a4fa2854c410d9e195bc3cf1a968fb0eec540187d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bd3e157177c14529fe4a6c707e0a9d21012e9965cb66a9340248b097b8f047\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.446202 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.446272 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.446299 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.446332 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.446359 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.457868 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95a2e787-3c51-42f8-b6fc-46b7c39ed39d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f7daea6a107d16fd08cc4158a0e0b42f74e6022ef899e810ac60b3ab2f2969f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t9hnr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6xsrm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.485590 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T16:36:20Z\\\",\\\"message\\\":\\\"ssionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.5.239],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0128 16:36:20.381942 6945 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382348 6945 lb_config.go:1031] Cluster endpoints for openshift-operator-lifecycle-manager/packageserver-service for network=default are: map[]\\\\nI0128 16:36:20.382361 6945 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI0128 16:36:20.382371 6945 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI0128 16:36:20.382377 6945 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0128 16:36:20.382369 6945 services_controller.go:443] Built service openshift-operator-lifecycle-manager/packageserver-service LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.4.153\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:5443, clusterEndpoints:services.lbEndpo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:36:19Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdz8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5gw27\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.513511 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4e02072-0a0b-46f8-bfd2-e280f3176882\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://03c82c3e6c52f08c4d2bf0308e9d1088a27023c2ec8ce0a0c7c163977c0b6b8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d01c6e8f334b7eb9b465bca94f19011489c9a5c9b1180110b3d88ab6e5a4c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://456daa5d52777cc4d3373b3d6a540879c49cab8179b3d976f22a38ab529fe7ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://360cd59a83a45618daa042cda86ec261bbd3065748ae9f6a6207a5eff1b896e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f9373257785e093cc79ea2af752b21c428cb5c15d85f41ad397a6f113404770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17a0bf5e39e09c802a625664b38f63783e7ab4dc6f3a4ab98a17a5c55001bf74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://37a0399f6e62c385793232da3b37a8f787f2e1dbb26fbd385b55c30e20a8c07f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://502420ec6fa5e2564a8a08c4f8d9ed6ffc0cd0dbf1aac60a4cc075219b9e5a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.537886 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"930e5f2b-0289-4e2c-878b-85bd08af1049\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:34:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:34:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T16:35:11Z\\\",\\\"message\\\":\\\"W0128 16:35:00.458706 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 16:35:00.459039 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769618100 cert, and key in /tmp/serving-cert-654186649/serving-signer.crt, /tmp/serving-cert-654186649/serving-signer.key\\\\nI0128 16:35:00.701351 1 observer_polling.go:159] Starting file observer\\\\nW0128 16:35:00.705190 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 16:35:00.705356 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 16:35:00.707337 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-654186649/tls.crt::/tmp/serving-cert-654186649/tls.key\\\\\\\"\\\\nF0128 16:35:11.183771 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:00Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:34:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:34:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:34:57Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.548616 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.548763 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.548853 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.549110 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.549208 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.560277 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0dd00b4f584e4a202227ae2e640e7141de9263c32f49800dbb55a6016ad003\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.579366 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e28e0844abff147b0f59181226d492ec33c063f549a65404bc815f599412837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.601269 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.623709 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:18Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dec63494d090a32af77fbe66ff6de812879009a37c335596cd5beb835a492ba0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aba26cda018807e1fe6a169241e10a597ccdfeff0d5543d556277e8072824b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.638292 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qn64p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"96167230-b465-4037-a8ac-23bec379d4ba\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e392f16d7ff63d50ce2ec85766b58230208a77b9eedf744e1ee1b9120778c4e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvg9l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qn64p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.652283 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.652342 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.652362 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.652387 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.652406 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.658701 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f86e157-eaeb-461b-b2e7-03c6a119c22e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ece4cb9b51c082c6e51757ef2509fd2d181bb38ed1adc94c394dca977ac30887\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94d2ae9d4553219c03c3055c77f29b76cd2572f921a4c3cd6dc33039eee32858\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rz7hn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2p4st\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.674768 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:17Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.692606 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5c6b6a7-d88e-419a-b28a-a4ae06d24576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c2ba98afcf4aa48896aeba2bdf39496b929140523c17df533122cad31e6c36f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T16:35:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b4cb5fe6f7581dfdfd007f496c73ad5c7bd5061a94e49edc0c6d79f26185de1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6bce706a98969235fc877e412c1a2b1328aeb97710e49661e332a82ba53eb2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1a6126980073ab4b999757c19beb45d69ff90a6e95d933494626ffa13e5d231\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cbfce89926d762aa524ca16117747e08fbcc824120a08450c7fe080836586703\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c8e0fa591098dd27ed17a6f41ebcaee70a779dc74d3011229e3049d3c65b64b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e1d6c0dcae26cda170320610d096990a183aa182e744621c52608e74a0dd5ff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T16:35:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T16:35:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zk6qf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:21Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gf9xn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.707581 4877 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T16:35:35Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q848q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T16:35:35Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bh9bk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T16:36:37Z is after 2025-08-24T17:21:41Z" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.755987 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.756057 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.756073 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.756100 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.756116 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.860936 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.861032 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.861051 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.861078 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.861100 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.964302 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.964366 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.964381 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.964408 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:37 crc kubenswrapper[4877]: I0128 16:36:37.964425 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:37Z","lastTransitionTime":"2026-01-28T16:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.068204 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.068262 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.068273 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.068295 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.068308 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.171243 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.171320 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.171339 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.171372 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.171394 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.275310 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.275421 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.275528 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.275559 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.275581 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.330620 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:38 crc kubenswrapper[4877]: E0128 16:36:38.330862 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.334109 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 00:09:24.070951476 +0000 UTC Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.378940 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.379009 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.379030 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.379061 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.379083 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.482737 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.482824 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.482910 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.482939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.482961 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.587164 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.587235 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.587255 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.587285 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.587309 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.691234 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.691312 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.691342 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.691381 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.691410 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.795066 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.795162 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.795184 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.795231 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.795265 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.899250 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.899330 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.899349 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.899375 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:38 crc kubenswrapper[4877]: I0128 16:36:38.899395 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:38Z","lastTransitionTime":"2026-01-28T16:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.002788 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.002851 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.002863 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.002885 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.002898 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.107838 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.107895 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.107908 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.107933 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.107951 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.211393 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.211452 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.211468 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.211513 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.211531 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.314567 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.314668 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.314725 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.314745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.314757 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.330282 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.330415 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:39 crc kubenswrapper[4877]: E0128 16:36:39.330711 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.330789 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:39 crc kubenswrapper[4877]: E0128 16:36:39.330938 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:39 crc kubenswrapper[4877]: E0128 16:36:39.331161 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.334343 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 12:16:49.32967489 +0000 UTC Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.418058 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.418125 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.418137 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.418161 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.418177 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.524034 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.524594 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.524613 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.524647 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.524677 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.623325 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:39 crc kubenswrapper[4877]: E0128 16:36:39.623681 4877 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:36:39 crc kubenswrapper[4877]: E0128 16:36:39.623797 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs podName:a6ea3417-5f04-4035-aaea-0dc5ad7d002d nodeName:}" failed. No retries permitted until 2026-01-28 16:37:43.623766587 +0000 UTC m=+167.182093505 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs") pod "network-metrics-daemon-bh9bk" (UID: "a6ea3417-5f04-4035-aaea-0dc5ad7d002d") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.628779 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.628833 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.628850 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.628879 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.628896 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.733298 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.733768 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.733926 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.734099 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.734259 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.837859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.837939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.837964 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.837998 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.838022 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.941830 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.941898 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.941915 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.941940 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:39 crc kubenswrapper[4877]: I0128 16:36:39.941958 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:39Z","lastTransitionTime":"2026-01-28T16:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.045727 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.045817 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.045840 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.045877 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.045904 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.148987 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.149070 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.149096 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.149132 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.149156 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.253282 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.253340 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.253360 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.253385 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.253403 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.329559 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:40 crc kubenswrapper[4877]: E0128 16:36:40.329741 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.334779 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 16:52:23.7134004 +0000 UTC Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.355939 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.356005 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.356022 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.356050 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.356073 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.459639 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.459690 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.459700 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.459717 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.459729 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.563149 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.563210 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.563265 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.563289 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.563307 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.666972 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.667032 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.667045 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.667069 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.667085 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.770447 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.770532 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.770548 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.770576 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.770594 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.874317 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.874404 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.874422 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.874449 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.874469 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.978109 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.978185 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.978204 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.978233 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:40 crc kubenswrapper[4877]: I0128 16:36:40.978251 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:40Z","lastTransitionTime":"2026-01-28T16:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.082123 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.082182 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.082199 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.082226 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.082246 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.184750 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.184814 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.184830 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.184857 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.184871 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.287980 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.288036 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.288046 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.288060 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.288070 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.329994 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:41 crc kubenswrapper[4877]: E0128 16:36:41.330196 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.330521 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.330460 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:41 crc kubenswrapper[4877]: E0128 16:36:41.330592 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:41 crc kubenswrapper[4877]: E0128 16:36:41.330750 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.334943 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 23:37:53.285129123 +0000 UTC Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.390718 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.390788 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.390806 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.390833 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.390849 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.493899 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.493946 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.493956 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.493975 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.493989 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.596567 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.596654 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.596677 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.596709 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.596734 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.700151 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.700226 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.700249 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.700280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.700302 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.804660 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.804748 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.804780 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.804818 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.804843 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.908744 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.908821 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.908847 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.908884 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:41 crc kubenswrapper[4877]: I0128 16:36:41.908910 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:41Z","lastTransitionTime":"2026-01-28T16:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.011969 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.012057 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.012075 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.012102 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.012121 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.115627 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.115680 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.115691 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.115709 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.115724 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.219403 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.219451 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.219461 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.219502 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.219521 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.323373 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.323439 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.323456 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.323500 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.323540 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.330027 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:42 crc kubenswrapper[4877]: E0128 16:36:42.330204 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.335092 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 22:37:24.593926236 +0000 UTC Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.426642 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.426707 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.426719 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.426737 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.426750 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.529702 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.529761 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.529771 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.529790 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.529802 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.633334 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.633397 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.633410 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.633432 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.633448 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.736937 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.737013 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.737038 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.737065 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.737083 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.839688 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.839759 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.839772 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.839796 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.839811 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.943000 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.943054 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.943065 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.943083 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:42 crc kubenswrapper[4877]: I0128 16:36:42.943094 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:42Z","lastTransitionTime":"2026-01-28T16:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.046870 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.046961 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.046986 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.047022 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.047051 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.150875 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.150920 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.150929 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.150944 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.150958 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.253280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.253347 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.253360 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.253384 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.253399 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.330493 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.330642 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:43 crc kubenswrapper[4877]: E0128 16:36:43.330711 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:43 crc kubenswrapper[4877]: E0128 16:36:43.330840 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.330506 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:43 crc kubenswrapper[4877]: E0128 16:36:43.330986 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.335370 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 10:15:56.795050733 +0000 UTC Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.356303 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.356350 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.356361 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.356377 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.356388 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.458732 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.458784 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.458797 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.458813 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.458823 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.562265 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.562314 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.562325 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.562343 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.562354 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.665745 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.665813 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.665831 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.665859 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.665878 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.769526 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.769567 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.769577 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.769593 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.769603 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.873294 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.873360 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.873379 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.873405 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.873418 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.976361 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.976427 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.976440 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.976466 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:43 crc kubenswrapper[4877]: I0128 16:36:43.976509 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:43Z","lastTransitionTime":"2026-01-28T16:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.078881 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.078931 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.078948 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.078971 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.078983 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.181909 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.182007 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.182045 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.182078 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.182280 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.285788 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.285857 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.285871 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.285901 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.285916 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.329869 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:44 crc kubenswrapper[4877]: E0128 16:36:44.330108 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.336225 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 02:11:08.785370012 +0000 UTC Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.388780 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.388822 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.388834 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.388849 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.388860 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.491597 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.491657 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.491674 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.491696 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.491709 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.596025 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.596081 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.596097 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.596120 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.596136 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.700705 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.700761 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.700771 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.700789 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.700800 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.805184 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.805257 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.805280 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.805307 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.805327 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.909804 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.909894 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.909912 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.909940 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:44 crc kubenswrapper[4877]: I0128 16:36:44.909960 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:44Z","lastTransitionTime":"2026-01-28T16:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.018847 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.018931 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.018975 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.019010 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.019033 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:45Z","lastTransitionTime":"2026-01-28T16:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.123814 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.123880 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.123898 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.123927 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.123948 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:45Z","lastTransitionTime":"2026-01-28T16:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.227111 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.227167 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.227181 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.227201 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.227213 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:45Z","lastTransitionTime":"2026-01-28T16:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.311607 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.311681 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.311700 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.311730 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.311750 4877 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T16:36:45Z","lastTransitionTime":"2026-01-28T16:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.329583 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.329631 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:45 crc kubenswrapper[4877]: E0128 16:36:45.329831 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.329986 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:45 crc kubenswrapper[4877]: E0128 16:36:45.330136 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:45 crc kubenswrapper[4877]: E0128 16:36:45.330338 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.336535 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 07:48:20.499939664 +0000 UTC Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.336728 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.348795 4877 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.376534 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr"] Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.377417 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.383222 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.383370 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.383649 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.383882 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.405137 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=83.40509589 podStartE2EDuration="1m23.40509589s" podCreationTimestamp="2026-01-28 16:35:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.404549817 +0000 UTC m=+108.962876705" watchObservedRunningTime="2026-01-28 16:36:45.40509589 +0000 UTC m=+108.963422828" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.422402 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=57.422359027 podStartE2EDuration="57.422359027s" podCreationTimestamp="2026-01-28 16:35:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.42205 +0000 UTC m=+108.980376928" watchObservedRunningTime="2026-01-28 16:36:45.422359027 +0000 UTC m=+108.980685965" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.439408 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podStartSLOduration=84.439392799 podStartE2EDuration="1m24.439392799s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.439278596 +0000 UTC m=+108.997605524" watchObservedRunningTime="2026-01-28 16:36:45.439392799 +0000 UTC m=+108.997719697" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.497703 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.497798 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.497846 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.497898 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-service-ca\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.497924 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.523783 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=27.523763146 podStartE2EDuration="27.523763146s" podCreationTimestamp="2026-01-28 16:36:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.52278589 +0000 UTC m=+109.081112778" watchObservedRunningTime="2026-01-28 16:36:45.523763146 +0000 UTC m=+109.082090034" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.550160 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=88.55012588 podStartE2EDuration="1m28.55012588s" podCreationTimestamp="2026-01-28 16:35:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.550094749 +0000 UTC m=+109.108421647" watchObservedRunningTime="2026-01-28 16:36:45.55012588 +0000 UTC m=+109.108452808" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.598842 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.598914 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.598945 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.598982 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.598998 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-service-ca\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.599024 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.599042 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.600012 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-service-ca\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.607087 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.621783 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/145b2d2f-1ec0-48bb-8417-ad691d3d9a86-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-njrmr\" (UID: \"145b2d2f-1ec0-48bb-8417-ad691d3d9a86\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.647111 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-qn64p" podStartSLOduration=84.647084452 podStartE2EDuration="1m24.647084452s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.643374406 +0000 UTC m=+109.201701314" watchObservedRunningTime="2026-01-28 16:36:45.647084452 +0000 UTC m=+109.205411340" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.658211 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2p4st" podStartSLOduration=84.65817628 podStartE2EDuration="1m24.65817628s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.657316557 +0000 UTC m=+109.215643455" watchObservedRunningTime="2026-01-28 16:36:45.65817628 +0000 UTC m=+109.216503178" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.704735 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.716570 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gf9xn" podStartSLOduration=84.716547692 podStartE2EDuration="1m24.716547692s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.701952014 +0000 UTC m=+109.260278902" watchObservedRunningTime="2026-01-28 16:36:45.716547692 +0000 UTC m=+109.274874580" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.745886 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=27.745244606 podStartE2EDuration="27.745244606s" podCreationTimestamp="2026-01-28 16:36:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.74346373 +0000 UTC m=+109.301790618" watchObservedRunningTime="2026-01-28 16:36:45.745244606 +0000 UTC m=+109.303571504" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.780679 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-hbxsq" podStartSLOduration=84.780655404 podStartE2EDuration="1m24.780655404s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.77897656 +0000 UTC m=+109.337303458" watchObservedRunningTime="2026-01-28 16:36:45.780655404 +0000 UTC m=+109.338982292" Jan 28 16:36:45 crc kubenswrapper[4877]: I0128 16:36:45.792052 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-x2fwz" podStartSLOduration=84.792024188 podStartE2EDuration="1m24.792024188s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:45.790616722 +0000 UTC m=+109.348943610" watchObservedRunningTime="2026-01-28 16:36:45.792024188 +0000 UTC m=+109.350351076" Jan 28 16:36:46 crc kubenswrapper[4877]: I0128 16:36:46.027659 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" event={"ID":"145b2d2f-1ec0-48bb-8417-ad691d3d9a86","Type":"ContainerStarted","Data":"3f3977bf627a38eb56b3c67d28a03a8cffaad671e0b61f8a7a751fb5c963d92a"} Jan 28 16:36:46 crc kubenswrapper[4877]: I0128 16:36:46.027749 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" event={"ID":"145b2d2f-1ec0-48bb-8417-ad691d3d9a86","Type":"ContainerStarted","Data":"dbd62c58884e13056d1937c14b18f4ac7d5d1eed32a7699bae1a47c2a86ed257"} Jan 28 16:36:46 crc kubenswrapper[4877]: I0128 16:36:46.329836 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:46 crc kubenswrapper[4877]: E0128 16:36:46.330045 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:46 crc kubenswrapper[4877]: I0128 16:36:46.331392 4877 scope.go:117] "RemoveContainer" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" Jan 28 16:36:46 crc kubenswrapper[4877]: E0128 16:36:46.331794 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:36:47 crc kubenswrapper[4877]: I0128 16:36:47.330330 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:47 crc kubenswrapper[4877]: I0128 16:36:47.330388 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:47 crc kubenswrapper[4877]: I0128 16:36:47.330355 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:47 crc kubenswrapper[4877]: E0128 16:36:47.332677 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:47 crc kubenswrapper[4877]: E0128 16:36:47.332808 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:47 crc kubenswrapper[4877]: E0128 16:36:47.332854 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:48 crc kubenswrapper[4877]: I0128 16:36:48.329902 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:48 crc kubenswrapper[4877]: E0128 16:36:48.330096 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:49 crc kubenswrapper[4877]: I0128 16:36:49.329920 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:49 crc kubenswrapper[4877]: E0128 16:36:49.330105 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:49 crc kubenswrapper[4877]: I0128 16:36:49.330194 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:49 crc kubenswrapper[4877]: I0128 16:36:49.330268 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:49 crc kubenswrapper[4877]: E0128 16:36:49.330454 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:49 crc kubenswrapper[4877]: E0128 16:36:49.330716 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:50 crc kubenswrapper[4877]: I0128 16:36:50.329414 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:50 crc kubenswrapper[4877]: E0128 16:36:50.329938 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:51 crc kubenswrapper[4877]: I0128 16:36:51.330648 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:51 crc kubenswrapper[4877]: I0128 16:36:51.332050 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:51 crc kubenswrapper[4877]: I0128 16:36:51.332853 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:51 crc kubenswrapper[4877]: E0128 16:36:51.333073 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:51 crc kubenswrapper[4877]: E0128 16:36:51.333277 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:51 crc kubenswrapper[4877]: E0128 16:36:51.333613 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:52 crc kubenswrapper[4877]: I0128 16:36:52.330271 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:52 crc kubenswrapper[4877]: E0128 16:36:52.330547 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:53 crc kubenswrapper[4877]: I0128 16:36:53.329520 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:53 crc kubenswrapper[4877]: I0128 16:36:53.329613 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:53 crc kubenswrapper[4877]: I0128 16:36:53.329527 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:53 crc kubenswrapper[4877]: E0128 16:36:53.329786 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:53 crc kubenswrapper[4877]: E0128 16:36:53.329866 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:53 crc kubenswrapper[4877]: E0128 16:36:53.330048 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:54 crc kubenswrapper[4877]: I0128 16:36:54.329862 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:54 crc kubenswrapper[4877]: E0128 16:36:54.330616 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:55 crc kubenswrapper[4877]: I0128 16:36:55.329974 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:55 crc kubenswrapper[4877]: I0128 16:36:55.329974 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:55 crc kubenswrapper[4877]: I0128 16:36:55.330167 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:55 crc kubenswrapper[4877]: E0128 16:36:55.330329 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:55 crc kubenswrapper[4877]: E0128 16:36:55.330675 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:55 crc kubenswrapper[4877]: E0128 16:36:55.330847 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.079722 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/1.log" Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.080614 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/0.log" Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.080696 4877 generic.go:334] "Generic (PLEG): container finished" podID="2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a" containerID="36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033" exitCode=1 Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.080747 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerDied","Data":"36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033"} Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.080799 4877 scope.go:117] "RemoveContainer" containerID="1da1642294a456114d2318e05d4964173bb761f38f04dce00395abd10d48638e" Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.082857 4877 scope.go:117] "RemoveContainer" containerID="36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033" Jan 28 16:36:56 crc kubenswrapper[4877]: E0128 16:36:56.083333 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-hbxsq_openshift-multus(2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a)\"" pod="openshift-multus/multus-hbxsq" podUID="2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a" Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.116854 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-njrmr" podStartSLOduration=95.116770221 podStartE2EDuration="1m35.116770221s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:36:46.052727734 +0000 UTC m=+109.611054632" watchObservedRunningTime="2026-01-28 16:36:56.116770221 +0000 UTC m=+119.675097109" Jan 28 16:36:56 crc kubenswrapper[4877]: I0128 16:36:56.329555 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:56 crc kubenswrapper[4877]: E0128 16:36:56.329792 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:57 crc kubenswrapper[4877]: I0128 16:36:57.087644 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/1.log" Jan 28 16:36:57 crc kubenswrapper[4877]: E0128 16:36:57.273164 4877 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 28 16:36:57 crc kubenswrapper[4877]: I0128 16:36:57.330464 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:57 crc kubenswrapper[4877]: E0128 16:36:57.332401 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:57 crc kubenswrapper[4877]: I0128 16:36:57.332541 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:57 crc kubenswrapper[4877]: I0128 16:36:57.332540 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:57 crc kubenswrapper[4877]: E0128 16:36:57.332709 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:57 crc kubenswrapper[4877]: E0128 16:36:57.333323 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:36:57 crc kubenswrapper[4877]: I0128 16:36:57.334115 4877 scope.go:117] "RemoveContainer" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" Jan 28 16:36:57 crc kubenswrapper[4877]: E0128 16:36:57.334586 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5gw27_openshift-ovn-kubernetes(3138aa2e-dca5-4d62-aa47-1fd2b559baaf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" Jan 28 16:36:57 crc kubenswrapper[4877]: E0128 16:36:57.480237 4877 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:36:58 crc kubenswrapper[4877]: I0128 16:36:58.330131 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:36:58 crc kubenswrapper[4877]: E0128 16:36:58.330667 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:36:59 crc kubenswrapper[4877]: I0128 16:36:59.330597 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:36:59 crc kubenswrapper[4877]: I0128 16:36:59.330679 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:36:59 crc kubenswrapper[4877]: E0128 16:36:59.330719 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:36:59 crc kubenswrapper[4877]: I0128 16:36:59.330820 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:36:59 crc kubenswrapper[4877]: E0128 16:36:59.330927 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:36:59 crc kubenswrapper[4877]: E0128 16:36:59.331039 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:37:00 crc kubenswrapper[4877]: I0128 16:37:00.329732 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:00 crc kubenswrapper[4877]: E0128 16:37:00.329875 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:37:01 crc kubenswrapper[4877]: I0128 16:37:01.330001 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:01 crc kubenswrapper[4877]: I0128 16:37:01.330059 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:01 crc kubenswrapper[4877]: I0128 16:37:01.330110 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:01 crc kubenswrapper[4877]: E0128 16:37:01.330228 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:37:01 crc kubenswrapper[4877]: E0128 16:37:01.330396 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:37:01 crc kubenswrapper[4877]: E0128 16:37:01.330616 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:37:02 crc kubenswrapper[4877]: I0128 16:37:02.330114 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:02 crc kubenswrapper[4877]: E0128 16:37:02.330285 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:37:02 crc kubenswrapper[4877]: E0128 16:37:02.481257 4877 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:37:03 crc kubenswrapper[4877]: I0128 16:37:03.330158 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:03 crc kubenswrapper[4877]: I0128 16:37:03.330209 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:03 crc kubenswrapper[4877]: I0128 16:37:03.330158 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:03 crc kubenswrapper[4877]: E0128 16:37:03.330346 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:37:03 crc kubenswrapper[4877]: E0128 16:37:03.330543 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:37:03 crc kubenswrapper[4877]: E0128 16:37:03.330679 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:37:04 crc kubenswrapper[4877]: I0128 16:37:04.330184 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:04 crc kubenswrapper[4877]: E0128 16:37:04.330416 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:37:05 crc kubenswrapper[4877]: I0128 16:37:05.329983 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:05 crc kubenswrapper[4877]: I0128 16:37:05.330088 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:05 crc kubenswrapper[4877]: I0128 16:37:05.330045 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:05 crc kubenswrapper[4877]: E0128 16:37:05.330233 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:37:05 crc kubenswrapper[4877]: E0128 16:37:05.330393 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:37:05 crc kubenswrapper[4877]: E0128 16:37:05.330556 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:37:06 crc kubenswrapper[4877]: I0128 16:37:06.330080 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:06 crc kubenswrapper[4877]: E0128 16:37:06.330246 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:37:07 crc kubenswrapper[4877]: I0128 16:37:07.330588 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:07 crc kubenswrapper[4877]: I0128 16:37:07.330666 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:07 crc kubenswrapper[4877]: I0128 16:37:07.330847 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:07 crc kubenswrapper[4877]: E0128 16:37:07.332419 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:37:07 crc kubenswrapper[4877]: E0128 16:37:07.332599 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:37:07 crc kubenswrapper[4877]: E0128 16:37:07.332779 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:37:07 crc kubenswrapper[4877]: E0128 16:37:07.482266 4877 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:37:08 crc kubenswrapper[4877]: I0128 16:37:08.331180 4877 scope.go:117] "RemoveContainer" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" Jan 28 16:37:08 crc kubenswrapper[4877]: I0128 16:37:08.331649 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:08 crc kubenswrapper[4877]: E0128 16:37:08.331938 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.144878 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/3.log" Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.152558 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerStarted","Data":"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e"} Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.153040 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.200356 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podStartSLOduration=108.200329952 podStartE2EDuration="1m48.200329952s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:09.200093536 +0000 UTC m=+132.758420464" watchObservedRunningTime="2026-01-28 16:37:09.200329952 +0000 UTC m=+132.758656840" Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.256580 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bh9bk"] Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.256764 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:09 crc kubenswrapper[4877]: E0128 16:37:09.256930 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.330107 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:09 crc kubenswrapper[4877]: I0128 16:37:09.330107 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:09 crc kubenswrapper[4877]: E0128 16:37:09.330262 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:37:09 crc kubenswrapper[4877]: E0128 16:37:09.330317 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:37:10 crc kubenswrapper[4877]: I0128 16:37:10.330089 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:10 crc kubenswrapper[4877]: E0128 16:37:10.330495 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:37:10 crc kubenswrapper[4877]: I0128 16:37:10.330650 4877 scope.go:117] "RemoveContainer" containerID="36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033" Jan 28 16:37:11 crc kubenswrapper[4877]: I0128 16:37:11.163662 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/1.log" Jan 28 16:37:11 crc kubenswrapper[4877]: I0128 16:37:11.164118 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerStarted","Data":"1a806e67e9fc104f5c007ae476ce9c24b6f511eb3bfb6094c15c3872b5d991f7"} Jan 28 16:37:11 crc kubenswrapper[4877]: I0128 16:37:11.330287 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:11 crc kubenswrapper[4877]: I0128 16:37:11.330362 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:11 crc kubenswrapper[4877]: E0128 16:37:11.330497 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bh9bk" podUID="a6ea3417-5f04-4035-aaea-0dc5ad7d002d" Jan 28 16:37:11 crc kubenswrapper[4877]: I0128 16:37:11.330620 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:11 crc kubenswrapper[4877]: E0128 16:37:11.330731 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 16:37:11 crc kubenswrapper[4877]: E0128 16:37:11.330827 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 16:37:12 crc kubenswrapper[4877]: I0128 16:37:12.329765 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:12 crc kubenswrapper[4877]: E0128 16:37:12.330099 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 16:37:13 crc kubenswrapper[4877]: I0128 16:37:13.330242 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:13 crc kubenswrapper[4877]: I0128 16:37:13.330354 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:13 crc kubenswrapper[4877]: I0128 16:37:13.330242 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:13 crc kubenswrapper[4877]: I0128 16:37:13.334007 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 16:37:13 crc kubenswrapper[4877]: I0128 16:37:13.334274 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 16:37:13 crc kubenswrapper[4877]: I0128 16:37:13.334636 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 16:37:13 crc kubenswrapper[4877]: I0128 16:37:13.335008 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 16:37:14 crc kubenswrapper[4877]: I0128 16:37:14.329969 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:14 crc kubenswrapper[4877]: I0128 16:37:14.333180 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 16:37:14 crc kubenswrapper[4877]: I0128 16:37:14.333306 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.205026 4877 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.256665 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.257388 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.257897 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.258562 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.261039 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.261309 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.261436 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.262598 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.262979 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.263208 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.265075 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.265212 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.265766 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.265866 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.267336 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q9rx2"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.267650 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.268049 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.268841 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.269805 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.270012 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.270184 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.270525 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.270662 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xjv5z"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.271124 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.277606 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.279275 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-f5npr"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.279694 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.279796 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.280015 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.280182 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.280250 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.280696 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.281163 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.285720 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.285872 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.286710 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.287801 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.288460 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-2m5lt"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.296798 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-gb5km"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.297459 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.312999 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.313192 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.313250 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.313440 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.315276 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.315531 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.315718 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.315933 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.316042 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.316145 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.316552 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.316651 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.316754 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.316932 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.317512 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.317597 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.315530 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.317923 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.317977 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.318337 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.318426 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.319685 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.320642 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.320835 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.321927 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.322312 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.322611 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.324371 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.324432 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.324861 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.324910 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.325057 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.330461 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.331258 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-vpzx9"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.331701 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vpzx9" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.332164 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.332644 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.333279 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.337465 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-whfj4"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.338139 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-8n6hm"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.338652 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.338989 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.339836 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.340081 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.341116 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.341841 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.342286 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.342771 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.343193 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.343249 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzc7h"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.343906 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-vbvr6"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.344219 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.344420 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.344671 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.346397 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.352250 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.353761 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.354632 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.354732 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355062 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355134 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355147 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355275 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355304 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355324 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355324 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355368 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355459 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355509 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355598 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355616 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355690 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355722 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355785 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355876 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355884 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.355959 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.356035 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.356808 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.371710 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q9rx2"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.371784 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rjg7v"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.379320 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.379831 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.380221 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.380390 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.380736 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.389598 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.411782 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.412319 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.412614 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.412969 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.416934 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.417130 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.418556 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.418896 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.420979 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.421405 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/1d38cd56-3dfc-495c-9d3e-23e78467ce65-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8qjwb\" (UID: \"1d38cd56-3dfc-495c-9d3e-23e78467ce65\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.421458 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d030698d-e4b8-409b-aa4a-63fc20b94771-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.421500 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-machine-approver-tls\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.421810 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.422265 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.422851 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.422921 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d030698d-e4b8-409b-aa4a-63fc20b94771-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.422967 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-node-pullsecrets\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423019 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-encryption-config\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423039 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwgnj\" (UniqueName: \"kubernetes.io/projected/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-kube-api-access-zwgnj\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423095 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-auth-proxy-config\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423127 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423183 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-audit\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423213 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-service-ca\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423271 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-client-ca\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423419 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423731 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423758 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423862 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423952 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.423436 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-audit-policies\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424056 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424125 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-encryption-config\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424153 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2dwf\" (UniqueName: \"kubernetes.io/projected/1f30a6e5-e444-46ee-8756-cac33b69c05e-kube-api-access-c2dwf\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424101 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424181 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1397d20-a909-4e16-a962-f0dad9942a82-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424209 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blnnh\" (UniqueName: \"kubernetes.io/projected/d030698d-e4b8-409b-aa4a-63fc20b94771-kube-api-access-blnnh\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424156 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424237 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424263 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-config\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424281 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-config\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424340 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1397d20-a909-4e16-a962-f0dad9942a82-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424370 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-trusted-ca-bundle\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424420 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvrf5\" (UniqueName: \"kubernetes.io/projected/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-kube-api-access-bvrf5\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424450 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f30a6e5-e444-46ee-8756-cac33b69c05e-serving-cert\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424490 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-oauth-config\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424515 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cmpd\" (UniqueName: \"kubernetes.io/projected/0c01a743-87ea-48f9-a8bd-69475721c4cc-kube-api-access-9cmpd\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424538 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-client-ca\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424564 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-etcd-client\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424580 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-serving-cert\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424605 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1397d20-a909-4e16-a962-f0dad9942a82-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424623 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7725b803-8d54-401d-bb4c-4112e90ddc0b-config\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424639 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-oauth-serving-cert\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424656 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-serving-cert\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424672 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424691 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-etcd-serving-ca\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424725 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-serving-cert\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424741 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0c01a743-87ea-48f9-a8bd-69475721c4cc-audit-dir\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424763 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dw7r\" (UniqueName: \"kubernetes.io/projected/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-kube-api-access-8dw7r\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424796 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-962qh\" (UniqueName: \"kubernetes.io/projected/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-kube-api-access-962qh\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424815 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424833 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-audit-dir\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424851 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfdmw\" (UniqueName: \"kubernetes.io/projected/7725b803-8d54-401d-bb4c-4112e90ddc0b-kube-api-access-cfdmw\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424867 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-image-import-ca\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424640 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425039 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-s295n"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.424885 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-config\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425254 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-config\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425296 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-serving-cert\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425343 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl4g5\" (UniqueName: \"kubernetes.io/projected/fbd014e8-90c4-488c-88fa-b68493bebb36-kube-api-access-sl4g5\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425368 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkgwb\" (UniqueName: \"kubernetes.io/projected/d1397d20-a909-4e16-a962-f0dad9942a82-kube-api-access-wkgwb\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425432 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-etcd-client\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425469 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7725b803-8d54-401d-bb4c-4112e90ddc0b-trusted-ca\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425515 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-config\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425559 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7725b803-8d54-401d-bb4c-4112e90ddc0b-serving-cert\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425585 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425601 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-service-ca-bundle\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425644 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pssp6\" (UniqueName: \"kubernetes.io/projected/1d38cd56-3dfc-495c-9d3e-23e78467ce65-kube-api-access-pssp6\") pod \"cluster-samples-operator-665b6dd947-8qjwb\" (UID: \"1d38cd56-3dfc-495c-9d3e-23e78467ce65\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425680 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbd014e8-90c4-488c-88fa-b68493bebb36-serving-cert\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425789 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-config\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.425908 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-s295n" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.426964 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xjv5z"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.427891 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.428745 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.429044 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.429688 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.429979 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.430462 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.432316 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-gb5km"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.432357 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.434418 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.437703 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.438420 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dxbts"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.438867 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.439148 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.439429 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.440624 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.440871 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.441228 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.441319 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.441402 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.441566 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.441636 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.441763 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.456158 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.456625 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.464549 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.465373 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.476523 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6c6br"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.477858 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.484019 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.485882 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.488003 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-2m5lt"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.488222 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.488247 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4vk27"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.488769 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.489902 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.516492 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.516817 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mq2n8"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.516985 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.517840 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.518854 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.519854 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526643 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f30a6e5-e444-46ee-8756-cac33b69c05e-serving-cert\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526692 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-oauth-config\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526710 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvrf5\" (UniqueName: \"kubernetes.io/projected/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-kube-api-access-bvrf5\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526732 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cmpd\" (UniqueName: \"kubernetes.io/projected/0c01a743-87ea-48f9-a8bd-69475721c4cc-kube-api-access-9cmpd\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526755 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-client-ca\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526772 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-etcd-client\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526790 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-serving-cert\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526812 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63015241-3e17-41fe-aa5d-1aa0b707970b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526835 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526854 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/fbd8dc94-00b1-4aff-a395-72702a0db6c1-tmpfs\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526874 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/cbcfea3d-b7da-4e43-86cf-7c32185eb863-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4c9wm\" (UID: \"cbcfea3d-b7da-4e43-86cf-7c32185eb863\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526895 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1397d20-a909-4e16-a962-f0dad9942a82-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526912 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7725b803-8d54-401d-bb4c-4112e90ddc0b-config\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526929 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-oauth-serving-cert\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526947 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63015241-3e17-41fe-aa5d-1aa0b707970b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526965 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv2pv\" (UniqueName: \"kubernetes.io/projected/fbd8dc94-00b1-4aff-a395-72702a0db6c1-kube-api-access-cv2pv\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526982 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-serving-cert\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.526998 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527015 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-etcd-serving-ca\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527031 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63015241-3e17-41fe-aa5d-1aa0b707970b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527048 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-policies\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527066 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1d91c2d-c142-43ae-9563-e614d1c11c82-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527085 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-serving-cert\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527103 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527126 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0c01a743-87ea-48f9-a8bd-69475721c4cc-audit-dir\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527144 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dw7r\" (UniqueName: \"kubernetes.io/projected/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-kube-api-access-8dw7r\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527161 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7zh2\" (UniqueName: \"kubernetes.io/projected/135ced37-b13a-473b-950f-c1ce9567d15f-kube-api-access-t7zh2\") pod \"migrator-59844c95c7-rjsfv\" (UID: \"135ced37-b13a-473b-950f-c1ce9567d15f\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527179 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8f60588-8859-46a0-94b7-77c176b03cc2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527198 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-962qh\" (UniqueName: \"kubernetes.io/projected/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-kube-api-access-962qh\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527216 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6749\" (UniqueName: \"kubernetes.io/projected/01fc6775-f774-41c4-872e-dba5e6d80e10-kube-api-access-b6749\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527231 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1d91c2d-c142-43ae-9563-e614d1c11c82-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527245 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527261 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-audit-dir\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527280 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfdmw\" (UniqueName: \"kubernetes.io/projected/7725b803-8d54-401d-bb4c-4112e90ddc0b-kube-api-access-cfdmw\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527297 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-config\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527314 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-dir\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527330 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527347 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-config\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527366 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-image-import-ca\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527382 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-serving-cert\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527391 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527406 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl4g5\" (UniqueName: \"kubernetes.io/projected/fbd014e8-90c4-488c-88fa-b68493bebb36-kube-api-access-sl4g5\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527426 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkgwb\" (UniqueName: \"kubernetes.io/projected/d1397d20-a909-4e16-a962-f0dad9942a82-kube-api-access-wkgwb\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527459 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtl4r\" (UniqueName: \"kubernetes.io/projected/17940ca1-0215-4491-a9f9-9177b04180d5-kube-api-access-vtl4r\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527491 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527508 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-etcd-client\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527525 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-config\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527545 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7725b803-8d54-401d-bb4c-4112e90ddc0b-trusted-ca\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527567 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7725b803-8d54-401d-bb4c-4112e90ddc0b-serving-cert\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527590 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8d34290-e390-4c87-ac72-8f1142bff53c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527621 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/17940ca1-0215-4491-a9f9-9177b04180d5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527644 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527678 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-service-ca-bundle\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527705 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pssp6\" (UniqueName: \"kubernetes.io/projected/1d38cd56-3dfc-495c-9d3e-23e78467ce65-kube-api-access-pssp6\") pod \"cluster-samples-operator-665b6dd947-8qjwb\" (UID: \"1d38cd56-3dfc-495c-9d3e-23e78467ce65\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527729 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527757 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbd014e8-90c4-488c-88fa-b68493bebb36-serving-cert\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527773 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527793 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17940ca1-0215-4491-a9f9-9177b04180d5-serving-cert\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527811 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krqtv\" (UniqueName: \"kubernetes.io/projected/c8f60588-8859-46a0-94b7-77c176b03cc2-kube-api-access-krqtv\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527829 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-config\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527846 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-config-volume\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527863 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fbd8dc94-00b1-4aff-a395-72702a0db6c1-apiservice-cert\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527886 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/1d38cd56-3dfc-495c-9d3e-23e78467ce65-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8qjwb\" (UID: \"1d38cd56-3dfc-495c-9d3e-23e78467ce65\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527902 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527896 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.528843 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0c01a743-87ea-48f9-a8bd-69475721c4cc-audit-dir\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.529023 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-etcd-serving-ca\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.529124 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.529121 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.529185 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-audit-dir\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.529543 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7725b803-8d54-401d-bb4c-4112e90ddc0b-config\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.529977 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-oauth-serving-cert\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530016 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530143 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-config\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530501 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-client-ca\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530760 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1397d20-a909-4e16-a962-f0dad9942a82-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.527920 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1d91c2d-c142-43ae-9563-e614d1c11c82-config\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530837 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-machine-approver-tls\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530863 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530883 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d030698d-e4b8-409b-aa4a-63fc20b94771-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530909 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530939 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5mgw\" (UniqueName: \"kubernetes.io/projected/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-kube-api-access-v5mgw\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530975 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-node-pullsecrets\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.530996 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-encryption-config\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531016 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d030698d-e4b8-409b-aa4a-63fc20b94771-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531022 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-image-import-ca\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531038 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwgnj\" (UniqueName: \"kubernetes.io/projected/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-kube-api-access-zwgnj\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531077 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-auth-proxy-config\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531117 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-audit\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531136 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-service-ca\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531154 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-client-ca\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531176 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531228 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-audit-policies\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531248 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-encryption-config\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531277 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2dwf\" (UniqueName: \"kubernetes.io/projected/1f30a6e5-e444-46ee-8756-cac33b69c05e-kube-api-access-c2dwf\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531297 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-metrics-tls\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531316 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blnnh\" (UniqueName: \"kubernetes.io/projected/d030698d-e4b8-409b-aa4a-63fc20b94771-kube-api-access-blnnh\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531332 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531350 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531412 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1397d20-a909-4e16-a962-f0dad9942a82-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531432 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbmdz\" (UniqueName: \"kubernetes.io/projected/cbcfea3d-b7da-4e43-86cf-7c32185eb863-kube-api-access-kbmdz\") pod \"control-plane-machine-set-operator-78cbb6b69f-4c9wm\" (UID: \"cbcfea3d-b7da-4e43-86cf-7c32185eb863\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.531453 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b8d34290-e390-4c87-ac72-8f1142bff53c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.532057 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.532497 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-service-ca-bundle\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.533552 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-whfj4"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.533596 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-f5npr"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.533607 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535548 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-config\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535603 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535626 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-config\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535651 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fbd8dc94-00b1-4aff-a395-72702a0db6c1-webhook-cert\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535692 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1397d20-a909-4e16-a962-f0dad9942a82-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535716 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-trusted-ca-bundle\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535745 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8f60588-8859-46a0-94b7-77c176b03cc2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.535769 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d34290-e390-4c87-ac72-8f1142bff53c-config\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.539247 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rjg7v"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.539298 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.539939 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.540548 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.543558 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.543873 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.543934 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.544052 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.546222 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d030698d-e4b8-409b-aa4a-63fc20b94771-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.546670 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-audit\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.546898 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f30a6e5-e444-46ee-8756-cac33b69c05e-serving-cert\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.547725 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-config\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.548411 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-auth-proxy-config\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.548462 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-client-ca\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.549006 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d030698d-e4b8-409b-aa4a-63fc20b94771-config\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.549056 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-audit-policies\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.549065 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7725b803-8d54-401d-bb4c-4112e90ddc0b-trusted-ca\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.549071 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-node-pullsecrets\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.549608 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-config\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.550588 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-serving-cert\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.547744 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-service-ca\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.550836 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-machine-approver-tls\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.551093 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c01a743-87ea-48f9-a8bd-69475721c4cc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.551093 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-config\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.551856 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-trusted-ca-bundle\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.552022 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-config\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.552250 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-encryption-config\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.552310 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1397d20-a909-4e16-a962-f0dad9942a82-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.552709 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-serving-cert\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.552778 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-serving-cert\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.553757 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbd014e8-90c4-488c-88fa-b68493bebb36-serving-cert\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.553778 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-8n6hm"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.554748 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/1d38cd56-3dfc-495c-9d3e-23e78467ce65-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8qjwb\" (UID: \"1d38cd56-3dfc-495c-9d3e-23e78467ce65\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.554986 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-oauth-config\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.555016 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-etcd-client\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.555088 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-serving-cert\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.555098 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0c01a743-87ea-48f9-a8bd-69475721c4cc-etcd-client\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.555278 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6c6br"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.555540 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-encryption-config\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.555655 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7725b803-8d54-401d-bb4c-4112e90ddc0b-serving-cert\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.558861 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.562102 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.563671 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mq2n8"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.563714 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.564424 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.567621 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.569219 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.574164 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-tjn2b"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.574675 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-config\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.575626 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.578634 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-hw4qp"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.579784 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzc7h"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.579821 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.580421 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.582096 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vpzx9"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.583148 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.584882 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.586662 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.589099 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dxbts"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.590290 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.591359 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4vk27"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.592675 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.593838 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.595156 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-s295n"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.598553 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.598620 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.599956 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-tjn2b"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.600345 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.601797 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-wq5bz"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.603466 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-wq5bz"] Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.603638 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.636768 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.636825 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5mgw\" (UniqueName: \"kubernetes.io/projected/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-kube-api-access-v5mgw\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.636881 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.636989 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-metrics-tls\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637050 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637086 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637122 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbmdz\" (UniqueName: \"kubernetes.io/projected/cbcfea3d-b7da-4e43-86cf-7c32185eb863-kube-api-access-kbmdz\") pod \"control-plane-machine-set-operator-78cbb6b69f-4c9wm\" (UID: \"cbcfea3d-b7da-4e43-86cf-7c32185eb863\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637169 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b8d34290-e390-4c87-ac72-8f1142bff53c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637202 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fbd8dc94-00b1-4aff-a395-72702a0db6c1-webhook-cert\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637232 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8f60588-8859-46a0-94b7-77c176b03cc2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637353 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d34290-e390-4c87-ac72-8f1142bff53c-config\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637419 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63015241-3e17-41fe-aa5d-1aa0b707970b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637455 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637503 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/fbd8dc94-00b1-4aff-a395-72702a0db6c1-tmpfs\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637639 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63015241-3e17-41fe-aa5d-1aa0b707970b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.637684 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/cbcfea3d-b7da-4e43-86cf-7c32185eb863-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4c9wm\" (UID: \"cbcfea3d-b7da-4e43-86cf-7c32185eb863\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641582 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63015241-3e17-41fe-aa5d-1aa0b707970b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641631 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-policies\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641680 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv2pv\" (UniqueName: \"kubernetes.io/projected/fbd8dc94-00b1-4aff-a395-72702a0db6c1-kube-api-access-cv2pv\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641724 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641767 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1d91c2d-c142-43ae-9563-e614d1c11c82-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641825 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7zh2\" (UniqueName: \"kubernetes.io/projected/135ced37-b13a-473b-950f-c1ce9567d15f-kube-api-access-t7zh2\") pod \"migrator-59844c95c7-rjsfv\" (UID: \"135ced37-b13a-473b-950f-c1ce9567d15f\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641864 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6749\" (UniqueName: \"kubernetes.io/projected/01fc6775-f774-41c4-872e-dba5e6d80e10-kube-api-access-b6749\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641893 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1d91c2d-c142-43ae-9563-e614d1c11c82-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.641952 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8f60588-8859-46a0-94b7-77c176b03cc2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642076 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-dir\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642143 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642230 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtl4r\" (UniqueName: \"kubernetes.io/projected/17940ca1-0215-4491-a9f9-9177b04180d5-kube-api-access-vtl4r\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642278 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642320 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8d34290-e390-4c87-ac72-8f1142bff53c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642371 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642409 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/17940ca1-0215-4491-a9f9-9177b04180d5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642441 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642520 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-config-volume\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642558 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17940ca1-0215-4491-a9f9-9177b04180d5-serving-cert\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642572 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/fbd8dc94-00b1-4aff-a395-72702a0db6c1-tmpfs\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642591 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krqtv\" (UniqueName: \"kubernetes.io/projected/c8f60588-8859-46a0-94b7-77c176b03cc2-kube-api-access-krqtv\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642625 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642658 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1d91c2d-c142-43ae-9563-e614d1c11c82-config\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.642692 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fbd8dc94-00b1-4aff-a395-72702a0db6c1-apiservice-cert\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.643150 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.644158 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.644216 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.644394 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.645157 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/17940ca1-0215-4491-a9f9-9177b04180d5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.645317 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.645608 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-policies\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.645762 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-dir\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.650450 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.652607 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.652902 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17940ca1-0215-4491-a9f9-9177b04180d5-serving-cert\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.654305 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.654662 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.654814 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.657535 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.660443 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.680840 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.688910 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c1d91c2d-c142-43ae-9563-e614d1c11c82-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.699216 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.706323 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1d91c2d-c142-43ae-9563-e614d1c11c82-config\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.716802 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.719331 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.739436 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.761282 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.781531 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.801742 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.820446 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.841392 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.852332 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b8d34290-e390-4c87-ac72-8f1142bff53c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.861741 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.879570 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.888858 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d34290-e390-4c87-ac72-8f1142bff53c-config\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.900598 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.920819 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.926109 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/cbcfea3d-b7da-4e43-86cf-7c32185eb863-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4c9wm\" (UID: \"cbcfea3d-b7da-4e43-86cf-7c32185eb863\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.940788 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.961755 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.971942 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8f60588-8859-46a0-94b7-77c176b03cc2-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.980835 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 16:37:16 crc kubenswrapper[4877]: I0128 16:37:16.999908 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.007364 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8f60588-8859-46a0-94b7-77c176b03cc2-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.021672 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.040438 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.046803 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63015241-3e17-41fe-aa5d-1aa0b707970b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.060124 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.066006 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63015241-3e17-41fe-aa5d-1aa0b707970b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.080291 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.100192 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.109529 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-config-volume\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.120383 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.130519 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-metrics-tls\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.141044 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.159680 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.180672 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.201681 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.229834 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.240294 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.260576 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.280348 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.300416 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.320795 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.345613 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.359919 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.380406 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.388716 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fbd8dc94-00b1-4aff-a395-72702a0db6c1-apiservice-cert\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.392009 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fbd8dc94-00b1-4aff-a395-72702a0db6c1-webhook-cert\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.401851 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.420540 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.438551 4877 request.go:700] Waited for 1.003324691s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.461071 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.481579 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.500870 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.520592 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.540651 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.560422 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.581401 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.600253 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.620768 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.641546 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.660409 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.681022 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.699985 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.719910 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.741454 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.761147 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.781671 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.801402 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.819423 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.841297 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.872759 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.880413 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.901402 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.919991 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.942178 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.960274 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 16:37:17 crc kubenswrapper[4877]: I0128 16:37:17.980930 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.001128 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.020955 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.041040 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.061462 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.080727 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.132249 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dw7r\" (UniqueName: \"kubernetes.io/projected/e6299f68-b95a-4cf2-b4ea-5ab666b0f21d-kube-api-access-8dw7r\") pod \"apiserver-76f77b778f-2m5lt\" (UID: \"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d\") " pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.138046 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfdmw\" (UniqueName: \"kubernetes.io/projected/7725b803-8d54-401d-bb4c-4112e90ddc0b-kube-api-access-cfdmw\") pod \"console-operator-58897d9998-gb5km\" (UID: \"7725b803-8d54-401d-bb4c-4112e90ddc0b\") " pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.160009 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.171338 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-962qh\" (UniqueName: \"kubernetes.io/projected/eace4e5a-20a2-43f4-8bb7-d5ddb171de98-kube-api-access-962qh\") pod \"machine-approver-56656f9798-pz67p\" (UID: \"eace4e5a-20a2-43f4-8bb7-d5ddb171de98\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.181448 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.224814 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvrf5\" (UniqueName: \"kubernetes.io/projected/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-kube-api-access-bvrf5\") pod \"console-f9d7485db-f5npr\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.238239 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwgnj\" (UniqueName: \"kubernetes.io/projected/92f14a91-e41e-4b81-bec5-ea6cf4a3037a-kube-api-access-zwgnj\") pod \"authentication-operator-69f744f599-q9rx2\" (UID: \"92f14a91-e41e-4b81-bec5-ea6cf4a3037a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.257247 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.261837 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pssp6\" (UniqueName: \"kubernetes.io/projected/1d38cd56-3dfc-495c-9d3e-23e78467ce65-kube-api-access-pssp6\") pod \"cluster-samples-operator-665b6dd947-8qjwb\" (UID: \"1d38cd56-3dfc-495c-9d3e-23e78467ce65\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.262105 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.288145 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2dwf\" (UniqueName: \"kubernetes.io/projected/1f30a6e5-e444-46ee-8756-cac33b69c05e-kube-api-access-c2dwf\") pod \"route-controller-manager-6576b87f9c-ck764\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.304207 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl4g5\" (UniqueName: \"kubernetes.io/projected/fbd014e8-90c4-488c-88fa-b68493bebb36-kube-api-access-sl4g5\") pod \"controller-manager-879f6c89f-xjv5z\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.318695 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkgwb\" (UniqueName: \"kubernetes.io/projected/d1397d20-a909-4e16-a962-f0dad9942a82-kube-api-access-wkgwb\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.340714 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.344595 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cmpd\" (UniqueName: \"kubernetes.io/projected/0c01a743-87ea-48f9-a8bd-69475721c4cc-kube-api-access-9cmpd\") pod \"apiserver-7bbb656c7d-4wpq4\" (UID: \"0c01a743-87ea-48f9-a8bd-69475721c4cc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.365738 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blnnh\" (UniqueName: \"kubernetes.io/projected/d030698d-e4b8-409b-aa4a-63fc20b94771-kube-api-access-blnnh\") pod \"openshift-apiserver-operator-796bbdcf4f-nphcd\" (UID: \"d030698d-e4b8-409b-aa4a-63fc20b94771\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.380491 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.389282 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.397692 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1397d20-a909-4e16-a962-f0dad9942a82-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7mnfs\" (UID: \"d1397d20-a909-4e16-a962-f0dad9942a82\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.399277 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.400970 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.409714 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.418005 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.418356 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.421077 4877 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.428681 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.439663 4877 request.go:700] Waited for 1.85943174s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-dockercfg-qx5rd&limit=500&resourceVersion=0 Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.442504 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.444665 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.460939 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.479986 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.491348 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.503998 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.504305 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-f5npr"] Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.521189 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 16:37:18 crc kubenswrapper[4877]: W0128 16:37:18.524504 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae95a71e_8f5b_45ac_b6e7_a78e2258de80.slice/crio-14ceb84210e6231dcbef2eb43194699995bec5901d2f29a0976583c0fc6c0d62 WatchSource:0}: Error finding container 14ceb84210e6231dcbef2eb43194699995bec5901d2f29a0976583c0fc6c0d62: Status 404 returned error can't find the container with id 14ceb84210e6231dcbef2eb43194699995bec5901d2f29a0976583c0fc6c0d62 Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.544367 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.560723 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.629156 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5mgw\" (UniqueName: \"kubernetes.io/projected/2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2-kube-api-access-v5mgw\") pod \"dns-default-s295n\" (UID: \"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2\") " pod="openshift-dns/dns-default-s295n" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.647505 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b8d34290-e390-4c87-ac72-8f1142bff53c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sljbl\" (UID: \"b8d34290-e390-4c87-ac72-8f1142bff53c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.683357 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63015241-3e17-41fe-aa5d-1aa0b707970b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-s4swd\" (UID: \"63015241-3e17-41fe-aa5d-1aa0b707970b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.687437 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbmdz\" (UniqueName: \"kubernetes.io/projected/cbcfea3d-b7da-4e43-86cf-7c32185eb863-kube-api-access-kbmdz\") pod \"control-plane-machine-set-operator-78cbb6b69f-4c9wm\" (UID: \"cbcfea3d-b7da-4e43-86cf-7c32185eb863\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.712501 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtl4r\" (UniqueName: \"kubernetes.io/projected/17940ca1-0215-4491-a9f9-9177b04180d5-kube-api-access-vtl4r\") pod \"openshift-config-operator-7777fb866f-pwhsx\" (UID: \"17940ca1-0215-4491-a9f9-9177b04180d5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.716085 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krqtv\" (UniqueName: \"kubernetes.io/projected/c8f60588-8859-46a0-94b7-77c176b03cc2-kube-api-access-krqtv\") pod \"kube-storage-version-migrator-operator-b67b599dd-fclsq\" (UID: \"c8f60588-8859-46a0-94b7-77c176b03cc2\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.736966 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6749\" (UniqueName: \"kubernetes.io/projected/01fc6775-f774-41c4-872e-dba5e6d80e10-kube-api-access-b6749\") pod \"oauth-openshift-558db77b4-whfj4\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.745444 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb"] Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.759968 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv2pv\" (UniqueName: \"kubernetes.io/projected/fbd8dc94-00b1-4aff-a395-72702a0db6c1-kube-api-access-cv2pv\") pod \"packageserver-d55dfcdfc-wnxxp\" (UID: \"fbd8dc94-00b1-4aff-a395-72702a0db6c1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.769873 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.780144 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7zh2\" (UniqueName: \"kubernetes.io/projected/135ced37-b13a-473b-950f-c1ce9567d15f-kube-api-access-t7zh2\") pod \"migrator-59844c95c7-rjsfv\" (UID: \"135ced37-b13a-473b-950f-c1ce9567d15f\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.795402 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.802161 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c1d91c2d-c142-43ae-9563-e614d1c11c82-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cq794\" (UID: \"c1d91c2d-c142-43ae-9563-e614d1c11c82\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.821736 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.842035 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.852858 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.860768 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.867053 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.875155 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-s295n" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889374 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-certificates\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889420 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-service-ca-bundle\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889457 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d4ddad9c-4170-48ae-9922-158233c9bce7-metrics-tls\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889494 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e771245-5049-41d1-a51f-f46222cd686b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889521 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d4ddad9c-4170-48ae-9922-158233c9bce7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889548 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64xvf\" (UniqueName: \"kubernetes.io/projected/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-kube-api-access-64xvf\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889599 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7kzr\" (UniqueName: \"kubernetes.io/projected/0e771245-5049-41d1-a51f-f46222cd686b-kube-api-access-w7kzr\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889621 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90e7c41-cead-4a7f-9369-425e27dccc26-config\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889653 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889681 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e771245-5049-41d1-a51f-f46222cd686b-proxy-tls\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889706 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-bound-sa-token\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889723 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/d90e7c41-cead-4a7f-9369-425e27dccc26-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889740 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3160a1cd-84f3-4e54-ae03-537f1b441c3a-metrics-tls\") pod \"dns-operator-744455d44c-rjg7v\" (UID: \"3160a1cd-84f3-4e54-ae03-537f1b441c3a\") " pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889755 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889779 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d4ddad9c-4170-48ae-9922-158233c9bce7-trusted-ca\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889795 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75ea5b20-f63e-438c-bddf-fdbcc5006672-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889820 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-default-certificate\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889859 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9frch\" (UniqueName: \"kubernetes.io/projected/d90e7c41-cead-4a7f-9369-425e27dccc26-kube-api-access-9frch\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889891 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdckl\" (UniqueName: \"kubernetes.io/projected/d4ddad9c-4170-48ae-9922-158233c9bce7-kube-api-access-jdckl\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889911 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-stats-auth\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889953 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjwtf\" (UniqueName: \"kubernetes.io/projected/75ea5b20-f63e-438c-bddf-fdbcc5006672-kube-api-access-gjwtf\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.889985 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-metrics-certs\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890021 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890041 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-trusted-ca\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890060 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sg24\" (UniqueName: \"kubernetes.io/projected/3160a1cd-84f3-4e54-ae03-537f1b441c3a-kube-api-access-8sg24\") pod \"dns-operator-744455d44c-rjg7v\" (UID: \"3160a1cd-84f3-4e54-ae03-537f1b441c3a\") " pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890090 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75ea5b20-f63e-438c-bddf-fdbcc5006672-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890109 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd78f\" (UniqueName: \"kubernetes.io/projected/754cf791-541c-4944-bf3e-7ba18f44d8de-kube-api-access-dd78f\") pod \"downloads-7954f5f757-vpzx9\" (UID: \"754cf791-541c-4944-bf3e-7ba18f44d8de\") " pod="openshift-console/downloads-7954f5f757-vpzx9" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890131 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-tls\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890157 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d90e7c41-cead-4a7f-9369-425e27dccc26-images\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.890176 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b6n4\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-kube-api-access-2b6n4\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: E0128 16:37:18.892653 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.392638261 +0000 UTC m=+142.950965149 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.892917 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.910061 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.928181 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-gb5km"] Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.931620 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-2m5lt"] Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.992007 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993404 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-config\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993446 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993590 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ffd807bc-800b-4108-b911-b8d41b86781d-images\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993638 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e771245-5049-41d1-a51f-f46222cd686b-proxy-tls\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993659 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-profile-collector-cert\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993686 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb8f6\" (UniqueName: \"kubernetes.io/projected/7a651f66-8ee1-41d0-87a1-241bbedd4be8-kube-api-access-pb8f6\") pod \"multus-admission-controller-857f4d67dd-6c6br\" (UID: \"7a651f66-8ee1-41d0-87a1-241bbedd4be8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993720 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/92988ced-2e30-46f3-926b-9d8c8cd6f953-cert\") pod \"ingress-canary-wq5bz\" (UID: \"92988ced-2e30-46f3-926b-9d8c8cd6f953\") " pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993741 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-bound-sa-token\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993758 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/d90e7c41-cead-4a7f-9369-425e27dccc26-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993786 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3160a1cd-84f3-4e54-ae03-537f1b441c3a-metrics-tls\") pod \"dns-operator-744455d44c-rjg7v\" (UID: \"3160a1cd-84f3-4e54-ae03-537f1b441c3a\") " pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993803 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993824 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-config-volume\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993847 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpkqf\" (UniqueName: \"kubernetes.io/projected/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-kube-api-access-wpkqf\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993863 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-service-ca\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993906 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d4ddad9c-4170-48ae-9922-158233c9bce7-trusted-ca\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993936 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75ea5b20-f63e-438c-bddf-fdbcc5006672-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993956 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-config\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.993977 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-srv-cert\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994025 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-default-certificate\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994053 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ffd807bc-800b-4108-b911-b8d41b86781d-proxy-tls\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994070 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-csi-data-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994112 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9frch\" (UniqueName: \"kubernetes.io/projected/d90e7c41-cead-4a7f-9369-425e27dccc26-kube-api-access-9frch\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994150 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdckl\" (UniqueName: \"kubernetes.io/projected/d4ddad9c-4170-48ae-9922-158233c9bce7-kube-api-access-jdckl\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994167 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-stats-auth\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994183 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/362f8c1a-4938-4ee4-853b-8f868147d732-profile-collector-cert\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994227 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smlnq\" (UniqueName: \"kubernetes.io/projected/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-kube-api-access-smlnq\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.994276 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjwtf\" (UniqueName: \"kubernetes.io/projected/75ea5b20-f63e-438c-bddf-fdbcc5006672-kube-api-access-gjwtf\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:18 crc kubenswrapper[4877]: E0128 16:37:18.995462 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.495427567 +0000 UTC m=+143.053754455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996632 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996646 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-metrics-certs\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996701 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-plugins-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996726 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqvrq\" (UniqueName: \"kubernetes.io/projected/048e6bc5-dad4-423f-a249-7c4addf02947-kube-api-access-qqvrq\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996772 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh28t\" (UniqueName: \"kubernetes.io/projected/50a9b1f2-6b58-4cde-90ed-8553152736e0-kube-api-access-kh28t\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996841 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996864 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/362f8c1a-4938-4ee4-853b-8f868147d732-srv-cert\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:18 crc kubenswrapper[4877]: I0128 16:37:18.996896 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-trusted-ca\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:18 crc kubenswrapper[4877]: E0128 16:37:18.998140 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.498129009 +0000 UTC m=+143.056455897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:18.999299 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-trusted-ca\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:18.999921 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sg24\" (UniqueName: \"kubernetes.io/projected/3160a1cd-84f3-4e54-ae03-537f1b441c3a-kube-api-access-8sg24\") pod \"dns-operator-744455d44c-rjg7v\" (UID: \"3160a1cd-84f3-4e54-ae03-537f1b441c3a\") " pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:18.999975 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4v8n\" (UniqueName: \"kubernetes.io/projected/137c3b13-a778-42d0-be21-e3a2052feeab-kube-api-access-s4v8n\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.000819 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-secret-volume\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.000887 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75ea5b20-f63e-438c-bddf-fdbcc5006672-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.000927 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd78f\" (UniqueName: \"kubernetes.io/projected/754cf791-541c-4944-bf3e-7ba18f44d8de-kube-api-access-dd78f\") pod \"downloads-7954f5f757-vpzx9\" (UID: \"754cf791-541c-4944-bf3e-7ba18f44d8de\") " pod="openshift-console/downloads-7954f5f757-vpzx9" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.000949 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl2fb\" (UniqueName: \"kubernetes.io/projected/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-kube-api-access-kl2fb\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.011105 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwqkk\" (UniqueName: \"kubernetes.io/projected/362f8c1a-4938-4ee4-853b-8f868147d732-kube-api-access-lwqkk\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.011196 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-client\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.011220 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-serving-cert\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.011252 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-tls\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.011281 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d90e7c41-cead-4a7f-9369-425e27dccc26-images\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.011307 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2k8j\" (UniqueName: \"kubernetes.io/projected/6a92f67e-224e-40a8-893d-edbe8dad2036-kube-api-access-v2k8j\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.013008 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d4ddad9c-4170-48ae-9922-158233c9bce7-trusted-ca\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.013336 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75ea5b20-f63e-438c-bddf-fdbcc5006672-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.017011 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3160a1cd-84f3-4e54-ae03-537f1b441c3a-metrics-tls\") pod \"dns-operator-744455d44c-rjg7v\" (UID: \"3160a1cd-84f3-4e54-ae03-537f1b441c3a\") " pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.017677 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-metrics-certs\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.020216 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-default-certificate\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.020578 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.021085 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd6xt\" (UniqueName: \"kubernetes.io/projected/a7623e5a-223d-4da1-94fe-d671bfc4cb3d-kube-api-access-sd6xt\") pod \"package-server-manager-789f6589d5-rlzcf\" (UID: \"a7623e5a-223d-4da1-94fe-d671bfc4cb3d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.021552 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.022142 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d90e7c41-cead-4a7f-9369-425e27dccc26-images\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.022303 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b6n4\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-kube-api-access-2b6n4\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.022374 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-registration-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.026317 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-ca\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.026366 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/1559cae1-e5e3-4f58-9412-d8bd25303099-signing-key\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.026463 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-certificates\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.026503 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-stats-auth\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.027952 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-certificates\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.028032 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-service-ca-bundle\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.028125 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-mountpoint-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.028226 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.028264 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d4ddad9c-4170-48ae-9922-158233c9bce7-metrics-tls\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.028300 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ffd807bc-800b-4108-b911-b8d41b86781d-auth-proxy-config\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.028326 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-socket-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.029172 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-service-ca-bundle\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.032543 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50a9b1f2-6b58-4cde-90ed-8553152736e0-serving-cert\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.032640 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e771245-5049-41d1-a51f-f46222cd686b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.032899 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwwjk\" (UniqueName: \"kubernetes.io/projected/1559cae1-e5e3-4f58-9412-d8bd25303099-kube-api-access-rwwjk\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.033660 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e771245-5049-41d1-a51f-f46222cd686b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.034670 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d4ddad9c-4170-48ae-9922-158233c9bce7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.034722 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64xvf\" (UniqueName: \"kubernetes.io/projected/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-kube-api-access-64xvf\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.035107 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7a651f66-8ee1-41d0-87a1-241bbedd4be8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6c6br\" (UID: \"7a651f66-8ee1-41d0-87a1-241bbedd4be8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.035278 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/1559cae1-e5e3-4f58-9412-d8bd25303099-signing-cabundle\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.035334 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s99pj\" (UniqueName: \"kubernetes.io/projected/92988ced-2e30-46f3-926b-9d8c8cd6f953-kube-api-access-s99pj\") pod \"ingress-canary-wq5bz\" (UID: \"92988ced-2e30-46f3-926b-9d8c8cd6f953\") " pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.036583 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6nct\" (UniqueName: \"kubernetes.io/projected/ffd807bc-800b-4108-b911-b8d41b86781d-kube-api-access-h6nct\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.036606 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/137c3b13-a778-42d0-be21-e3a2052feeab-node-bootstrap-token\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.036663 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7kzr\" (UniqueName: \"kubernetes.io/projected/0e771245-5049-41d1-a51f-f46222cd686b-kube-api-access-w7kzr\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.036694 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/137c3b13-a778-42d0-be21-e3a2052feeab-certs\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.036714 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a7623e5a-223d-4da1-94fe-d671bfc4cb3d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rlzcf\" (UID: \"a7623e5a-223d-4da1-94fe-d671bfc4cb3d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.037951 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90e7c41-cead-4a7f-9369-425e27dccc26-config\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.038864 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75ea5b20-f63e-438c-bddf-fdbcc5006672-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.039435 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-tls\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.041508 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e771245-5049-41d1-a51f-f46222cd686b-proxy-tls\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.042289 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/d90e7c41-cead-4a7f-9369-425e27dccc26-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.042358 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90e7c41-cead-4a7f-9369-425e27dccc26-config\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.050533 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjwtf\" (UniqueName: \"kubernetes.io/projected/75ea5b20-f63e-438c-bddf-fdbcc5006672-kube-api-access-gjwtf\") pod \"openshift-controller-manager-operator-756b6f6bc6-zft2b\" (UID: \"75ea5b20-f63e-438c-bddf-fdbcc5006672\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.062412 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-bound-sa-token\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.074418 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d4ddad9c-4170-48ae-9922-158233c9bce7-metrics-tls\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.076156 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.082077 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.087449 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sg24\" (UniqueName: \"kubernetes.io/projected/3160a1cd-84f3-4e54-ae03-537f1b441c3a-kube-api-access-8sg24\") pod \"dns-operator-744455d44c-rjg7v\" (UID: \"3160a1cd-84f3-4e54-ae03-537f1b441c3a\") " pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.089059 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.100061 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdckl\" (UniqueName: \"kubernetes.io/projected/d4ddad9c-4170-48ae-9922-158233c9bce7-kube-api-access-jdckl\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.108021 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.129052 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-whfj4"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.131534 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.134939 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9frch\" (UniqueName: \"kubernetes.io/projected/d90e7c41-cead-4a7f-9369-425e27dccc26-kube-api-access-9frch\") pod \"machine-api-operator-5694c8668f-8n6hm\" (UID: \"d90e7c41-cead-4a7f-9369-425e27dccc26\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.137264 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd78f\" (UniqueName: \"kubernetes.io/projected/754cf791-541c-4944-bf3e-7ba18f44d8de-kube-api-access-dd78f\") pod \"downloads-7954f5f757-vpzx9\" (UID: \"754cf791-541c-4944-bf3e-7ba18f44d8de\") " pod="openshift-console/downloads-7954f5f757-vpzx9" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.139040 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.139831 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.639799152 +0000 UTC m=+143.198126050 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142002 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/362f8c1a-4938-4ee4-853b-8f868147d732-profile-collector-cert\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142123 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smlnq\" (UniqueName: \"kubernetes.io/projected/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-kube-api-access-smlnq\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142191 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh28t\" (UniqueName: \"kubernetes.io/projected/50a9b1f2-6b58-4cde-90ed-8553152736e0-kube-api-access-kh28t\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142214 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-plugins-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142236 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqvrq\" (UniqueName: \"kubernetes.io/projected/048e6bc5-dad4-423f-a249-7c4addf02947-kube-api-access-qqvrq\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142278 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142302 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/362f8c1a-4938-4ee4-853b-8f868147d732-srv-cert\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142344 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4v8n\" (UniqueName: \"kubernetes.io/projected/137c3b13-a778-42d0-be21-e3a2052feeab-kube-api-access-s4v8n\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142376 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-secret-volume\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142886 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl2fb\" (UniqueName: \"kubernetes.io/projected/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-kube-api-access-kl2fb\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142919 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwqkk\" (UniqueName: \"kubernetes.io/projected/362f8c1a-4938-4ee4-853b-8f868147d732-kube-api-access-lwqkk\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142970 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-client\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.142995 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-serving-cert\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143008 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-plugins-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143028 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2k8j\" (UniqueName: \"kubernetes.io/projected/6a92f67e-224e-40a8-893d-edbe8dad2036-kube-api-access-v2k8j\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143097 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd6xt\" (UniqueName: \"kubernetes.io/projected/a7623e5a-223d-4da1-94fe-d671bfc4cb3d-kube-api-access-sd6xt\") pod \"package-server-manager-789f6589d5-rlzcf\" (UID: \"a7623e5a-223d-4da1-94fe-d671bfc4cb3d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143172 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143213 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-registration-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143244 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-ca\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143265 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/1559cae1-e5e3-4f58-9412-d8bd25303099-signing-key\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143304 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-mountpoint-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143329 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143377 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ffd807bc-800b-4108-b911-b8d41b86781d-auth-proxy-config\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143399 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-socket-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143430 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50a9b1f2-6b58-4cde-90ed-8553152736e0-serving-cert\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143565 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwwjk\" (UniqueName: \"kubernetes.io/projected/1559cae1-e5e3-4f58-9412-d8bd25303099-kube-api-access-rwwjk\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143604 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7a651f66-8ee1-41d0-87a1-241bbedd4be8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6c6br\" (UID: \"7a651f66-8ee1-41d0-87a1-241bbedd4be8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143638 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/1559cae1-e5e3-4f58-9412-d8bd25303099-signing-cabundle\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143667 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6nct\" (UniqueName: \"kubernetes.io/projected/ffd807bc-800b-4108-b911-b8d41b86781d-kube-api-access-h6nct\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143689 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s99pj\" (UniqueName: \"kubernetes.io/projected/92988ced-2e30-46f3-926b-9d8c8cd6f953-kube-api-access-s99pj\") pod \"ingress-canary-wq5bz\" (UID: \"92988ced-2e30-46f3-926b-9d8c8cd6f953\") " pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.143721 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.643702045 +0000 UTC m=+143.202028933 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143750 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/137c3b13-a778-42d0-be21-e3a2052feeab-node-bootstrap-token\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143803 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/137c3b13-a778-42d0-be21-e3a2052feeab-certs\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143826 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a7623e5a-223d-4da1-94fe-d671bfc4cb3d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rlzcf\" (UID: \"a7623e5a-223d-4da1-94fe-d671bfc4cb3d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143851 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-config\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143884 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ffd807bc-800b-4108-b911-b8d41b86781d-images\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.143909 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-profile-collector-cert\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144067 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb8f6\" (UniqueName: \"kubernetes.io/projected/7a651f66-8ee1-41d0-87a1-241bbedd4be8-kube-api-access-pb8f6\") pod \"multus-admission-controller-857f4d67dd-6c6br\" (UID: \"7a651f66-8ee1-41d0-87a1-241bbedd4be8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144099 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/92988ced-2e30-46f3-926b-9d8c8cd6f953-cert\") pod \"ingress-canary-wq5bz\" (UID: \"92988ced-2e30-46f3-926b-9d8c8cd6f953\") " pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144161 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-config-volume\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144192 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpkqf\" (UniqueName: \"kubernetes.io/projected/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-kube-api-access-wpkqf\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144232 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-service-ca\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144270 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-config\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144290 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-srv-cert\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144311 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ffd807bc-800b-4108-b911-b8d41b86781d-proxy-tls\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144338 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-csi-data-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144752 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-csi-data-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144888 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ffd807bc-800b-4108-b911-b8d41b86781d-auth-proxy-config\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.144972 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-mountpoint-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.145552 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-socket-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.146385 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-ca\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.146817 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/1559cae1-e5e3-4f58-9412-d8bd25303099-signing-cabundle\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.146850 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/048e6bc5-dad4-423f-a249-7c4addf02947-registration-dir\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.149497 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7a651f66-8ee1-41d0-87a1-241bbedd4be8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6c6br\" (UID: \"7a651f66-8ee1-41d0-87a1-241bbedd4be8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.150377 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/362f8c1a-4938-4ee4-853b-8f868147d732-profile-collector-cert\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.155049 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50a9b1f2-6b58-4cde-90ed-8553152736e0-serving-cert\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.155166 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-config-volume\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.155594 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-config\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.157987 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/1559cae1-e5e3-4f58-9412-d8bd25303099-signing-key\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.158178 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.159947 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/92988ced-2e30-46f3-926b-9d8c8cd6f953-cert\") pod \"ingress-canary-wq5bz\" (UID: \"92988ced-2e30-46f3-926b-9d8c8cd6f953\") " pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.160809 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ffd807bc-800b-4108-b911-b8d41b86781d-images\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.161493 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.162369 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/362f8c1a-4938-4ee4-853b-8f868147d732-srv-cert\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.163108 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/137c3b13-a778-42d0-be21-e3a2052feeab-certs\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.165334 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-profile-collector-cert\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.165453 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-secret-volume\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.168491 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ffd807bc-800b-4108-b911-b8d41b86781d-proxy-tls\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.168847 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-serving-cert\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.171104 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-client\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.173724 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-srv-cert\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.179787 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/137c3b13-a778-42d0-be21-e3a2052feeab-node-bootstrap-token\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.180261 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-etcd-service-ca\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.181055 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a7623e5a-223d-4da1-94fe-d671bfc4cb3d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rlzcf\" (UID: \"a7623e5a-223d-4da1-94fe-d671bfc4cb3d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.182563 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xjv5z"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.185504 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50a9b1f2-6b58-4cde-90ed-8553152736e0-config\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.191504 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.192235 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b6n4\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-kube-api-access-2b6n4\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: W0128 16:37:19.194100 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1397d20_a909_4e16_a962_f0dad9942a82.slice/crio-55dc8ca474509990047cf3d40140a4dedb42f02e27a11de907e681797020f71d WatchSource:0}: Error finding container 55dc8ca474509990047cf3d40140a4dedb42f02e27a11de907e681797020f71d: Status 404 returned error can't find the container with id 55dc8ca474509990047cf3d40140a4dedb42f02e27a11de907e681797020f71d Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.198532 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64xvf\" (UniqueName: \"kubernetes.io/projected/0e667586-4bc0-4e00-9aec-fac9ad2b49ca-kube-api-access-64xvf\") pod \"router-default-5444994796-vbvr6\" (UID: \"0e667586-4bc0-4e00-9aec-fac9ad2b49ca\") " pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.206784 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" event={"ID":"eace4e5a-20a2-43f4-8bb7-d5ddb171de98","Type":"ContainerStarted","Data":"6033e00e0d50e90b5fd24e29f31fad47dd05788a1297fddf07db1c839bd73b5f"} Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.206834 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" event={"ID":"eace4e5a-20a2-43f4-8bb7-d5ddb171de98","Type":"ContainerStarted","Data":"c7baf7351caa1bdfc3c05573141e71507e9568e00f7153f269494b67d4e9517f"} Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.216592 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-q9rx2"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.219958 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" event={"ID":"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d","Type":"ContainerStarted","Data":"8afe7d9059c82d220e5a0bd1f812a025802234dff82f98e0fd4f098adf6ef13e"} Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.221981 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-gb5km" event={"ID":"7725b803-8d54-401d-bb4c-4112e90ddc0b","Type":"ContainerStarted","Data":"41b4dcc69e90ca8e4b6671bbff1978e9dd2e59b0dc395403d057d65b5afd88c1"} Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.226054 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" event={"ID":"1d38cd56-3dfc-495c-9d3e-23e78467ce65","Type":"ContainerStarted","Data":"69aac026b09f8b456adc2dc0cce5c6c7672eee680939609cc084ce37dea32428"} Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.228413 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-f5npr" event={"ID":"ae95a71e-8f5b-45ac-b6e7-a78e2258de80","Type":"ContainerStarted","Data":"c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb"} Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.228441 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-f5npr" event={"ID":"ae95a71e-8f5b-45ac-b6e7-a78e2258de80","Type":"ContainerStarted","Data":"14ceb84210e6231dcbef2eb43194699995bec5901d2f29a0976583c0fc6c0d62"} Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.232735 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d4ddad9c-4170-48ae-9922-158233c9bce7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-qlrhq\" (UID: \"d4ddad9c-4170-48ae-9922-158233c9bce7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.238055 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7kzr\" (UniqueName: \"kubernetes.io/projected/0e771245-5049-41d1-a51f-f46222cd686b-kube-api-access-w7kzr\") pod \"machine-config-controller-84d6567774-mdr8p\" (UID: \"0e771245-5049-41d1-a51f-f46222cd686b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.245205 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.245400 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.74536166 +0000 UTC m=+143.303688548 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.245679 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.246293 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.746286455 +0000 UTC m=+143.304613343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.264601 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2k8j\" (UniqueName: \"kubernetes.io/projected/6a92f67e-224e-40a8-893d-edbe8dad2036-kube-api-access-v2k8j\") pod \"marketplace-operator-79b997595-4vk27\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.274034 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.292343 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqvrq\" (UniqueName: \"kubernetes.io/projected/048e6bc5-dad4-423f-a249-7c4addf02947-kube-api-access-qqvrq\") pod \"csi-hostpathplugin-tjn2b\" (UID: \"048e6bc5-dad4-423f-a249-7c4addf02947\") " pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.296899 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.303216 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl2fb\" (UniqueName: \"kubernetes.io/projected/4ab01b2e-115e-4406-bfdf-9a9c2615ee5e-kube-api-access-kl2fb\") pod \"service-ca-operator-777779d784-x8q7t\" (UID: \"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.315109 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4v8n\" (UniqueName: \"kubernetes.io/projected/137c3b13-a778-42d0-be21-e3a2052feeab-kube-api-access-s4v8n\") pod \"machine-config-server-hw4qp\" (UID: \"137c3b13-a778-42d0-be21-e3a2052feeab\") " pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.317143 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.327917 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-hw4qp" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.342253 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s99pj\" (UniqueName: \"kubernetes.io/projected/92988ced-2e30-46f3-926b-9d8c8cd6f953-kube-api-access-s99pj\") pod \"ingress-canary-wq5bz\" (UID: \"92988ced-2e30-46f3-926b-9d8c8cd6f953\") " pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.342871 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-vpzx9" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.350432 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.351939 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.851897424 +0000 UTC m=+143.410224312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.376898 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smlnq\" (UniqueName: \"kubernetes.io/projected/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-kube-api-access-smlnq\") pod \"collect-profiles-29493630-n5564\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.387906 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh28t\" (UniqueName: \"kubernetes.io/projected/50a9b1f2-6b58-4cde-90ed-8553152736e0-kube-api-access-kh28t\") pod \"etcd-operator-b45778765-dxbts\" (UID: \"50a9b1f2-6b58-4cde-90ed-8553152736e0\") " pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.390229 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.395860 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.397735 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.406035 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.409449 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd6xt\" (UniqueName: \"kubernetes.io/projected/a7623e5a-223d-4da1-94fe-d671bfc4cb3d-kube-api-access-sd6xt\") pod \"package-server-manager-789f6589d5-rlzcf\" (UID: \"a7623e5a-223d-4da1-94fe-d671bfc4cb3d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.423311 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwqkk\" (UniqueName: \"kubernetes.io/projected/362f8c1a-4938-4ee4-853b-8f868147d732-kube-api-access-lwqkk\") pod \"olm-operator-6b444d44fb-n9qwr\" (UID: \"362f8c1a-4938-4ee4-853b-8f868147d732\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.452628 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.453059 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:19.953045705 +0000 UTC m=+143.511372593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.454625 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwwjk\" (UniqueName: \"kubernetes.io/projected/1559cae1-e5e3-4f58-9412-d8bd25303099-kube-api-access-rwwjk\") pod \"service-ca-9c57cc56f-mq2n8\" (UID: \"1559cae1-e5e3-4f58-9412-d8bd25303099\") " pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.458506 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpkqf\" (UniqueName: \"kubernetes.io/projected/a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f-kube-api-access-wpkqf\") pod \"catalog-operator-68c6474976-6q4hg\" (UID: \"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:19 crc kubenswrapper[4877]: W0128 16:37:19.471747 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8f60588_8859_46a0_94b7_77c176b03cc2.slice/crio-e596e184d0537d319a97c245e61ab2e7b76289973f05cb307be5a917ec120e67 WatchSource:0}: Error finding container e596e184d0537d319a97c245e61ab2e7b76289973f05cb307be5a917ec120e67: Status 404 returned error can't find the container with id e596e184d0537d319a97c245e61ab2e7b76289973f05cb307be5a917ec120e67 Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.478270 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb8f6\" (UniqueName: \"kubernetes.io/projected/7a651f66-8ee1-41d0-87a1-241bbedd4be8-kube-api-access-pb8f6\") pod \"multus-admission-controller-857f4d67dd-6c6br\" (UID: \"7a651f66-8ee1-41d0-87a1-241bbedd4be8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.486455 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.501817 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.506019 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6nct\" (UniqueName: \"kubernetes.io/projected/ffd807bc-800b-4108-b911-b8d41b86781d-kube-api-access-h6nct\") pod \"machine-config-operator-74547568cd-mvwft\" (UID: \"ffd807bc-800b-4108-b911-b8d41b86781d\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.519723 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.530264 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.536999 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.542709 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.549837 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.558076 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.560328 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.562798 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.562906 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.570001 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.069945577 +0000 UTC m=+143.628272475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.582611 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.588940 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.591750 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.612447 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.615466 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.636710 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-wq5bz" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.660108 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-rjg7v"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.664425 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.664986 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.164971894 +0000 UTC m=+143.723298782 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: W0128 16:37:19.693332 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8d34290_e390_4c87_ac72_8f1142bff53c.slice/crio-1f4c34b8dc19bf6efdf88c0863633233445f0ea4436715d5371c8f80cec98978 WatchSource:0}: Error finding container 1f4c34b8dc19bf6efdf88c0863633233445f0ea4436715d5371c8f80cec98978: Status 404 returned error can't find the container with id 1f4c34b8dc19bf6efdf88c0863633233445f0ea4436715d5371c8f80cec98978 Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.696403 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-s295n"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.714693 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp"] Jan 28 16:37:19 crc kubenswrapper[4877]: W0128 16:37:19.748562 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcbcfea3d_b7da_4e43_86cf_7c32185eb863.slice/crio-63044d9cbbb0fba5e5bd5d959c422a16b02ef098b58c024e6684a7824e0c4b55 WatchSource:0}: Error finding container 63044d9cbbb0fba5e5bd5d959c422a16b02ef098b58c024e6684a7824e0c4b55: Status 404 returned error can't find the container with id 63044d9cbbb0fba5e5bd5d959c422a16b02ef098b58c024e6684a7824e0c4b55 Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.769410 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.769760 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.269740582 +0000 UTC m=+143.828067470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.785150 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.859872 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-vpzx9"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.871798 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.872201 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.372188267 +0000 UTC m=+143.930515145 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.897539 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-f5npr" podStartSLOduration=118.897487723 podStartE2EDuration="1m58.897487723s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:19.896116526 +0000 UTC m=+143.454443414" watchObservedRunningTime="2026-01-28 16:37:19.897487723 +0000 UTC m=+143.455814611" Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.961210 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-tjn2b"] Jan 28 16:37:19 crc kubenswrapper[4877]: I0128 16:37:19.973166 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:19 crc kubenswrapper[4877]: E0128 16:37:19.973813 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.4737868 +0000 UTC m=+144.032113688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:19 crc kubenswrapper[4877]: W0128 16:37:19.980870 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod754cf791_541c_4944_bf3e_7ba18f44d8de.slice/crio-d77fd0aeb84f6da69c57ee7822ce1a80ead5d9cb6792f36313ec326ed3121855 WatchSource:0}: Error finding container d77fd0aeb84f6da69c57ee7822ce1a80ead5d9cb6792f36313ec326ed3121855: Status 404 returned error can't find the container with id d77fd0aeb84f6da69c57ee7822ce1a80ead5d9cb6792f36313ec326ed3121855 Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.048360 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4vk27"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.075067 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.075567 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.575545747 +0000 UTC m=+144.133872635 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.146776 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.177391 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.177679 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.677616133 +0000 UTC m=+144.235943021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.178177 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.187727 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.687698562 +0000 UTC m=+144.246025450 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.256264 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" event={"ID":"1f30a6e5-e444-46ee-8756-cac33b69c05e","Type":"ContainerStarted","Data":"e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.256346 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" event={"ID":"1f30a6e5-e444-46ee-8756-cac33b69c05e","Type":"ContainerStarted","Data":"5dd9c095a930649db5cd45a4052aeea602fe2a1e09dd5d756bfe80909e9fe2dd"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.256590 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.260351 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-s295n" event={"ID":"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2","Type":"ContainerStarted","Data":"4060662c5679c02849aa3dc9f5d56d2b83c30d75b30dd25278c3633099166543"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.267149 4877 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-ck764 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.267282 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" podUID="1f30a6e5-e444-46ee-8756-cac33b69c05e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.269808 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" event={"ID":"1d38cd56-3dfc-495c-9d3e-23e78467ce65","Type":"ContainerStarted","Data":"bae8a4e1620ec6b21e6c21326f6b598522d0148ff4c8c52f27a2bd32e413aed7"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.269860 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" event={"ID":"1d38cd56-3dfc-495c-9d3e-23e78467ce65","Type":"ContainerStarted","Data":"5a140efecee27ce760957a477fb58dd1ccb79df9164350bc2c3b42b021c76f3d"} Jan 28 16:37:20 crc kubenswrapper[4877]: W0128 16:37:20.285197 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4ddad9c_4170_48ae_9922_158233c9bce7.slice/crio-9f227f8db32db732c07e2a1b01dd9a5fe7974c10ee51e0fe671021398ddf9578 WatchSource:0}: Error finding container 9f227f8db32db732c07e2a1b01dd9a5fe7974c10ee51e0fe671021398ddf9578: Status 404 returned error can't find the container with id 9f227f8db32db732c07e2a1b01dd9a5fe7974c10ee51e0fe671021398ddf9578 Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.291677 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.293058 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.793037874 +0000 UTC m=+144.351364752 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.315645 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" event={"ID":"3160a1cd-84f3-4e54-ae03-537f1b441c3a","Type":"ContainerStarted","Data":"01790435c1bff6ed155a9715df59a69975370d80ba4c1652b62cbefe6c598c3b"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.321157 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-hw4qp" event={"ID":"137c3b13-a778-42d0-be21-e3a2052feeab","Type":"ContainerStarted","Data":"fa2ac8b922402fb8d8c1d567327d85ced27066ede9e0e233b8a507229c7fb11f"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.325683 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" event={"ID":"c1d91c2d-c142-43ae-9563-e614d1c11c82","Type":"ContainerStarted","Data":"e8b2fb92a9c7ab5d720f94862f638f4eb03e5dedd562690caf79f40f00908a40"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.341515 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" event={"ID":"135ced37-b13a-473b-950f-c1ce9567d15f","Type":"ContainerStarted","Data":"526e34551d85681ed31a0706959e8c2e004dcd9d35b90bd78d4a76644497d92e"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.348117 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" event={"ID":"fbd8dc94-00b1-4aff-a395-72702a0db6c1","Type":"ContainerStarted","Data":"5b1482ef8e146322451204b9d779d6b8d15fc40f63fd931dc49fab333d382150"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.350677 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" event={"ID":"01fc6775-f774-41c4-872e-dba5e6d80e10","Type":"ContainerStarted","Data":"2f7b2feb8a3607cad91f718ba88af53d7f50dec920a138c70a18fbe5264f51f1"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.352733 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-vbvr6" event={"ID":"0e667586-4bc0-4e00-9aec-fac9ad2b49ca","Type":"ContainerStarted","Data":"d84135ab28d590c176b24097a9005238ba4809366b824be3f0a2f389ccd31f30"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.357265 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" event={"ID":"c8f60588-8859-46a0-94b7-77c176b03cc2","Type":"ContainerStarted","Data":"e596e184d0537d319a97c245e61ab2e7b76289973f05cb307be5a917ec120e67"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.359836 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" event={"ID":"75ea5b20-f63e-438c-bddf-fdbcc5006672","Type":"ContainerStarted","Data":"bd2dfc78a904598fe5f1fd69917675fc464c221a7888ca885aaf551c12d919ca"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.363122 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" event={"ID":"d030698d-e4b8-409b-aa4a-63fc20b94771","Type":"ContainerStarted","Data":"6f16ceefdcfc213b147d8a3267470d603253dcd90f592234ad997a4225fd0165"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.363163 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" event={"ID":"d030698d-e4b8-409b-aa4a-63fc20b94771","Type":"ContainerStarted","Data":"19c1a54d47e6aeee6386136e8251ad4166cf646324d9303c57e423a65f669b00"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.365950 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-gb5km" event={"ID":"7725b803-8d54-401d-bb4c-4112e90ddc0b","Type":"ContainerStarted","Data":"14f1be4d0505c32bd1eeb391774c5eb808ae854133fdcc4cfb5ae08fbdae06e4"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.367251 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.368419 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vpzx9" event={"ID":"754cf791-541c-4944-bf3e-7ba18f44d8de","Type":"ContainerStarted","Data":"d77fd0aeb84f6da69c57ee7822ce1a80ead5d9cb6792f36313ec326ed3121855"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.375196 4877 patch_prober.go:28] interesting pod/console-operator-58897d9998-gb5km container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.375367 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-gb5km" podUID="7725b803-8d54-401d-bb4c-4112e90ddc0b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.377584 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" event={"ID":"92f14a91-e41e-4b81-bec5-ea6cf4a3037a","Type":"ContainerStarted","Data":"0a681ed5943d49884a6c2ba7f6b8383b1917204e8ea81ed4c5c7237bf02a2f4f"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.377632 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" event={"ID":"92f14a91-e41e-4b81-bec5-ea6cf4a3037a","Type":"ContainerStarted","Data":"533a28bf0d72c7beeb07528551b106f18b123db730fbfc16793a9184fd962232"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.389653 4877 generic.go:334] "Generic (PLEG): container finished" podID="e6299f68-b95a-4cf2-b4ea-5ab666b0f21d" containerID="c75901638749682bb7959749113a9724d26fdc8f588bfe3bce8e86651609db35" exitCode=0 Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.389827 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" event={"ID":"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d","Type":"ContainerDied","Data":"c75901638749682bb7959749113a9724d26fdc8f588bfe3bce8e86651609db35"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.392918 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.393459 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:20.893433975 +0000 UTC m=+144.451761043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.404254 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-wq5bz"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.408386 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" event={"ID":"fbd014e8-90c4-488c-88fa-b68493bebb36","Type":"ContainerStarted","Data":"ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.408461 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" event={"ID":"fbd014e8-90c4-488c-88fa-b68493bebb36","Type":"ContainerStarted","Data":"eceda3fa8e4496950b88d49b96597cc2fd173a5e7acc9700ddb2b6c04e8de563"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.409445 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.409495 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-8n6hm"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.413351 4877 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-xjv5z container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.413412 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" podUID="fbd014e8-90c4-488c-88fa-b68493bebb36" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.427175 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" event={"ID":"b8d34290-e390-4c87-ac72-8f1142bff53c","Type":"ContainerStarted","Data":"1f4c34b8dc19bf6efdf88c0863633233445f0ea4436715d5371c8f80cec98978"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.431036 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" event={"ID":"048e6bc5-dad4-423f-a249-7c4addf02947","Type":"ContainerStarted","Data":"8d644d94c2afac7006a64eab855b5bc8333079bc941ad42f81f1a6889c3bab02"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.433331 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" event={"ID":"d1397d20-a909-4e16-a962-f0dad9942a82","Type":"ContainerStarted","Data":"d48e10a090c856a4bfe8250aa2dbd0a0d2e85a5d3412c3a33820548af11102db"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.433390 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" event={"ID":"d1397d20-a909-4e16-a962-f0dad9942a82","Type":"ContainerStarted","Data":"55dc8ca474509990047cf3d40140a4dedb42f02e27a11de907e681797020f71d"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.434961 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" event={"ID":"6a92f67e-224e-40a8-893d-edbe8dad2036","Type":"ContainerStarted","Data":"d94253d912f1bd972c05e0096e642e5723fbeda80e2d809a4b02287933c40e23"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.435799 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" event={"ID":"cbcfea3d-b7da-4e43-86cf-7c32185eb863","Type":"ContainerStarted","Data":"63044d9cbbb0fba5e5bd5d959c422a16b02ef098b58c024e6684a7824e0c4b55"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.443743 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" event={"ID":"0c01a743-87ea-48f9-a8bd-69475721c4cc","Type":"ContainerStarted","Data":"3d4d34cbdf834ec2d4ce4c5e869879d4210d24859712d0470511c5d706495a6b"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.452358 4877 generic.go:334] "Generic (PLEG): container finished" podID="17940ca1-0215-4491-a9f9-9177b04180d5" containerID="fa3d35e9446f3c904b9b3c92fbb188c472a9f300ef9c011b0a9be28d3e6a6083" exitCode=0 Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.454115 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" event={"ID":"17940ca1-0215-4491-a9f9-9177b04180d5","Type":"ContainerDied","Data":"fa3d35e9446f3c904b9b3c92fbb188c472a9f300ef9c011b0a9be28d3e6a6083"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.454199 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" event={"ID":"17940ca1-0215-4491-a9f9-9177b04180d5","Type":"ContainerStarted","Data":"141ffce53df87e946828263da23b181c3cd53e40564ec38aba1bbf66da425f9e"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.496058 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.500202 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.000161205 +0000 UTC m=+144.558488233 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.514358 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" event={"ID":"eace4e5a-20a2-43f4-8bb7-d5ddb171de98","Type":"ContainerStarted","Data":"35671c9b0665cc155f8694b869578123647e9a0ba4b3ad7202d262c97f040455"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.520406 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" event={"ID":"63015241-3e17-41fe-aa5d-1aa0b707970b","Type":"ContainerStarted","Data":"de24d3e9a93a832221d04b087277ae2ce7fc8f50baac25738f299fbd9d79226b"} Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.522709 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-dxbts"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.553808 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.606576 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.607003 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.106983918 +0000 UTC m=+144.665310806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.647897 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.710044 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.710516 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.2104564 +0000 UTC m=+144.768783288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.711104 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.711622 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.211601111 +0000 UTC m=+144.769927999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.747139 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.799220 4877 csr.go:261] certificate signing request csr-ndchl is approved, waiting to be issued Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.805856 4877 csr.go:257] certificate signing request csr-ndchl is issued Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.812628 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.813126 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.313108141 +0000 UTC m=+144.871435029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.830763 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr"] Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.913778 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:20 crc kubenswrapper[4877]: E0128 16:37:20.917459 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.417433537 +0000 UTC m=+144.975760425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:20 crc kubenswrapper[4877]: I0128 16:37:20.984899 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg"] Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.016723 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.017397 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.517366646 +0000 UTC m=+145.075693534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: W0128 16:37:21.116840 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod362f8c1a_4938_4ee4_853b_8f868147d732.slice/crio-4a680b67f1f4aa6763655a2a44b4fa5faec84112d2ab3a66240580bc314a90ce WatchSource:0}: Error finding container 4a680b67f1f4aa6763655a2a44b4fa5faec84112d2ab3a66240580bc314a90ce: Status 404 returned error can't find the container with id 4a680b67f1f4aa6763655a2a44b4fa5faec84112d2ab3a66240580bc314a90ce Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.118107 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.118726 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.618708091 +0000 UTC m=+145.177034989 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: W0128 16:37:21.130932 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6ee20f2_cf0c_470f_a536_9b6f0dfaee7f.slice/crio-43066955130ac46c49b03131dc74cd972d5ce1b7033d9640895da293091e4a40 WatchSource:0}: Error finding container 43066955130ac46c49b03131dc74cd972d5ce1b7033d9640895da293091e4a40: Status 404 returned error can't find the container with id 43066955130ac46c49b03131dc74cd972d5ce1b7033d9640895da293091e4a40 Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.220379 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.221209 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.721179477 +0000 UTC m=+145.279506365 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.222074 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.222515 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.722498273 +0000 UTC m=+145.280825161 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.294338 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" podStartSLOduration=120.29431209 podStartE2EDuration="2m0.29431209s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.290908389 +0000 UTC m=+144.849235287" watchObservedRunningTime="2026-01-28 16:37:21.29431209 +0000 UTC m=+144.852638978" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.320353 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qjwb" podStartSLOduration=120.320329305 podStartE2EDuration="2m0.320329305s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.318081694 +0000 UTC m=+144.876408592" watchObservedRunningTime="2026-01-28 16:37:21.320329305 +0000 UTC m=+144.878656193" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.325025 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.325217 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.825167105 +0000 UTC m=+145.383493983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.325713 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.326074 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.826067368 +0000 UTC m=+145.384394256 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.352314 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-gb5km" podStartSLOduration=120.352289838 podStartE2EDuration="2m0.352289838s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.351765005 +0000 UTC m=+144.910091893" watchObservedRunningTime="2026-01-28 16:37:21.352289838 +0000 UTC m=+144.910616726" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.385778 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" podStartSLOduration=120.385751292 podStartE2EDuration="2m0.385751292s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.383542573 +0000 UTC m=+144.941869451" watchObservedRunningTime="2026-01-28 16:37:21.385751292 +0000 UTC m=+144.944078180" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.426972 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t"] Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.427032 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mq2n8"] Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.427049 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564"] Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.428330 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" podStartSLOduration=120.428311878 podStartE2EDuration="2m0.428311878s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.425070332 +0000 UTC m=+144.983397220" watchObservedRunningTime="2026-01-28 16:37:21.428311878 +0000 UTC m=+144.986638766" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.428388 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.430499 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:21.930455906 +0000 UTC m=+145.488782804 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.473024 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6c6br"] Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.499373 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7mnfs" podStartSLOduration=120.499347275 podStartE2EDuration="2m0.499347275s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.497994169 +0000 UTC m=+145.056321057" watchObservedRunningTime="2026-01-28 16:37:21.499347275 +0000 UTC m=+145.057674163" Jan 28 16:37:21 crc kubenswrapper[4877]: W0128 16:37:21.525658 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod587b4df1_7315_4a7f_b416_d2e3ff99fd0d.slice/crio-79be4f738d44f1a3c6bd8779b974ee354fea0a13df1ce72d84308f828f1781c3 WatchSource:0}: Error finding container 79be4f738d44f1a3c6bd8779b974ee354fea0a13df1ce72d84308f828f1781c3: Status 404 returned error can't find the container with id 79be4f738d44f1a3c6bd8779b974ee354fea0a13df1ce72d84308f828f1781c3 Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.531836 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.532238 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.032222772 +0000 UTC m=+145.590549660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.553228 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-nphcd" podStartSLOduration=120.553208173 podStartE2EDuration="2m0.553208173s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.550881281 +0000 UTC m=+145.109208179" watchObservedRunningTime="2026-01-28 16:37:21.553208173 +0000 UTC m=+145.111535061" Jan 28 16:37:21 crc kubenswrapper[4877]: W0128 16:37:21.592862 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1559cae1_e5e3_4f58_9412_d8bd25303099.slice/crio-76fe35752acfb3f93c057f10ff65578a3c3a7d1e62046d0c7ac5b9c9d822ff2e WatchSource:0}: Error finding container 76fe35752acfb3f93c057f10ff65578a3c3a7d1e62046d0c7ac5b9c9d822ff2e: Status 404 returned error can't find the container with id 76fe35752acfb3f93c057f10ff65578a3c3a7d1e62046d0c7ac5b9c9d822ff2e Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.594071 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" event={"ID":"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f","Type":"ContainerStarted","Data":"43066955130ac46c49b03131dc74cd972d5ce1b7033d9640895da293091e4a40"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.600066 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" event={"ID":"0e771245-5049-41d1-a51f-f46222cd686b","Type":"ContainerStarted","Data":"bbc934777b602f927f6eaacae4df175130dd9aee4842e93cc9c3b049eb1c355b"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.605632 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" event={"ID":"362f8c1a-4938-4ee4-853b-8f868147d732","Type":"ContainerStarted","Data":"4a680b67f1f4aa6763655a2a44b4fa5faec84112d2ab3a66240580bc314a90ce"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.613342 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" event={"ID":"a7623e5a-223d-4da1-94fe-d671bfc4cb3d","Type":"ContainerStarted","Data":"1ef76540f875f4516797c6d16dabe2b1bd2b8e5b007ee3054d0ee5d7cd345996"} Jan 28 16:37:21 crc kubenswrapper[4877]: W0128 16:37:21.615984 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7a651f66_8ee1_41d0_87a1_241bbedd4be8.slice/crio-3c65f93a8815200121bb88a86c65bc82bc07a422c9dd55e5223a0e1e7dc9535a WatchSource:0}: Error finding container 3c65f93a8815200121bb88a86c65bc82bc07a422c9dd55e5223a0e1e7dc9535a: Status 404 returned error can't find the container with id 3c65f93a8815200121bb88a86c65bc82bc07a422c9dd55e5223a0e1e7dc9535a Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.638292 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.638669 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.138654974 +0000 UTC m=+145.696981852 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.640998 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" event={"ID":"135ced37-b13a-473b-950f-c1ce9567d15f","Type":"ContainerStarted","Data":"f37390086bb0d79cdc2750374069b8b4564a51bc69410d20f80d3c4413d397c6"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.642703 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-vbvr6" event={"ID":"0e667586-4bc0-4e00-9aec-fac9ad2b49ca","Type":"ContainerStarted","Data":"f1e389df6d6d7f0c358ccdb35e3c6c0d8d6cfa14d747685e983fb8a4f5ef929d"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.652999 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pz67p" podStartSLOduration=120.652971637 podStartE2EDuration="2m0.652971637s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.593671833 +0000 UTC m=+145.151998721" watchObservedRunningTime="2026-01-28 16:37:21.652971637 +0000 UTC m=+145.211298525" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.672502 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" event={"ID":"75ea5b20-f63e-438c-bddf-fdbcc5006672","Type":"ContainerStarted","Data":"a6b70207f38ebafba98b19092fb4e6ede3e97e81d32dafc82380dbdf4c8905b4"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.685820 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" event={"ID":"d4ddad9c-4170-48ae-9922-158233c9bce7","Type":"ContainerStarted","Data":"9923591c9d312342442ab14f7afdafeae1b66648501f8f11606606c231a5c954"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.685898 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" event={"ID":"d4ddad9c-4170-48ae-9922-158233c9bce7","Type":"ContainerStarted","Data":"9f227f8db32db732c07e2a1b01dd9a5fe7974c10ee51e0fe671021398ddf9578"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.694382 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-wq5bz" event={"ID":"92988ced-2e30-46f3-926b-9d8c8cd6f953","Type":"ContainerStarted","Data":"17a86cb0444659124ed7f8e4221be852f3822810aad188e8c29db97e9923ee0f"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.715245 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-vbvr6" podStartSLOduration=120.715228659 podStartE2EDuration="2m0.715228659s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.714730996 +0000 UTC m=+145.273057884" watchObservedRunningTime="2026-01-28 16:37:21.715228659 +0000 UTC m=+145.273555547" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.718678 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" event={"ID":"cbcfea3d-b7da-4e43-86cf-7c32185eb863","Type":"ContainerStarted","Data":"1694325343acd3c6e07d1b2f1d88e9de1c23d8b77806df34efc3d2cb2b0a25a4"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.729810 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" event={"ID":"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e","Type":"ContainerStarted","Data":"6c3bbcd72e37fa544b678ab0ef8075875b4f14237f381c312472f2c74074d0af"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.744919 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.747425 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.247396978 +0000 UTC m=+145.805724056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.764261 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" event={"ID":"ffd807bc-800b-4108-b911-b8d41b86781d","Type":"ContainerStarted","Data":"087231b71d57bf986e478b3e0f0553cbf4afada7c836d694973f41fd1a65d7cb"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.765938 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" event={"ID":"d90e7c41-cead-4a7f-9369-425e27dccc26","Type":"ContainerStarted","Data":"797ed07296e847370ee4766ab1dd8f53b693f5587147b5caedb511ab1d38c268"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.810019 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-28 16:32:20 +0000 UTC, rotation deadline is 2026-10-26 14:11:18.76008079 +0000 UTC Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.810053 4877 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6501h33m56.950030319s for next certificate rotation Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.810647 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-zft2b" podStartSLOduration=120.810615706 podStartE2EDuration="2m0.810615706s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.809775414 +0000 UTC m=+145.368102302" watchObservedRunningTime="2026-01-28 16:37:21.810615706 +0000 UTC m=+145.368942594" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.819120 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" event={"ID":"b8d34290-e390-4c87-ac72-8f1142bff53c","Type":"ContainerStarted","Data":"ba8fdc07b844f79ae9d7d97324973aa0a30a0db1d680d6a11ec73e21728e93ab"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.853253 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" event={"ID":"c8f60588-8859-46a0-94b7-77c176b03cc2","Type":"ContainerStarted","Data":"bb8aec0db6cb74e7da482c1501625654a467c544e8728b0c4d9ed1a7d16d0d05"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.853344 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.853837 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4c9wm" podStartSLOduration=120.85380979 podStartE2EDuration="2m0.85380979s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.852298679 +0000 UTC m=+145.410625567" watchObservedRunningTime="2026-01-28 16:37:21.85380979 +0000 UTC m=+145.412136678" Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.854335 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.354315323 +0000 UTC m=+145.912642211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.859281 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" event={"ID":"01fc6775-f774-41c4-872e-dba5e6d80e10","Type":"ContainerStarted","Data":"fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.860888 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.876604 4877 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-whfj4 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.14:6443/healthz\": dial tcp 10.217.0.14:6443: connect: connection refused" start-of-body= Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.876674 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" podUID="01fc6775-f774-41c4-872e-dba5e6d80e10" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.14:6443/healthz\": dial tcp 10.217.0.14:6443: connect: connection refused" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.900465 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" event={"ID":"6a92f67e-224e-40a8-893d-edbe8dad2036","Type":"ContainerStarted","Data":"dbf9d3e7d5bbaf2e290fbb65cc7684e1af3f3a60eeffb7ae0b777c27af2ceadd"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.901776 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.923950 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" event={"ID":"c1d91c2d-c142-43ae-9563-e614d1c11c82","Type":"ContainerStarted","Data":"165bf26e1a515493aab6825cea69f75df5426fdc6e13f1b1c60db2bfcad75a7c"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.931660 4877 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-4vk27 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.931748 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.956333 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:21 crc kubenswrapper[4877]: E0128 16:37:21.959064 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.45904732 +0000 UTC m=+146.017374198 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.966771 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-vpzx9" event={"ID":"754cf791-541c-4944-bf3e-7ba18f44d8de","Type":"ContainerStarted","Data":"18305e3bcec5dc75e01995b478272009ce8a1c2f8ce1de8e49dd3506bc2fae1f"} Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.968919 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-vpzx9" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.994839 4877 patch_prober.go:28] interesting pod/downloads-7954f5f757-vpzx9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.994927 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vpzx9" podUID="754cf791-541c-4944-bf3e-7ba18f44d8de" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 16:37:21 crc kubenswrapper[4877]: I0128 16:37:21.995160 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-hw4qp" event={"ID":"137c3b13-a778-42d0-be21-e3a2052feeab","Type":"ContainerStarted","Data":"d1323fb804bae16b4aeb941ec96d6e98ab5f839af8a3466c9f757efca4781816"} Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.007336 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sljbl" podStartSLOduration=121.007305679 podStartE2EDuration="2m1.007305679s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:21.959731208 +0000 UTC m=+145.518058096" watchObservedRunningTime="2026-01-28 16:37:22.007305679 +0000 UTC m=+145.565632567" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.007740 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" podStartSLOduration=121.00773176 podStartE2EDuration="2m1.00773176s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:22.004859933 +0000 UTC m=+145.563186821" watchObservedRunningTime="2026-01-28 16:37:22.00773176 +0000 UTC m=+145.566058668" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.017570 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" event={"ID":"fbd8dc94-00b1-4aff-a395-72702a0db6c1","Type":"ContainerStarted","Data":"e38df2f463a4f849d127bcc8b0a7271853f0cd16c0cfcce9111358ee6b759fc8"} Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.018869 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.020804 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" event={"ID":"50a9b1f2-6b58-4cde-90ed-8553152736e0","Type":"ContainerStarted","Data":"87df0a9100b1da02cddd7a1b195c0ea203a1828e91afc69274be30cfb8954eae"} Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.032698 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" start-of-body= Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.032777 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.047824 4877 generic.go:334] "Generic (PLEG): container finished" podID="0c01a743-87ea-48f9-a8bd-69475721c4cc" containerID="588f35664b70b986bbf4675bd43258887bac08e9f66ec990a1121dbd895b912d" exitCode=0 Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.048697 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" podStartSLOduration=121.048666382 podStartE2EDuration="2m1.048666382s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:22.04519335 +0000 UTC m=+145.603520238" watchObservedRunningTime="2026-01-28 16:37:22.048666382 +0000 UTC m=+145.606993270" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.049861 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" event={"ID":"0c01a743-87ea-48f9-a8bd-69475721c4cc","Type":"ContainerDied","Data":"588f35664b70b986bbf4675bd43258887bac08e9f66ec990a1121dbd895b912d"} Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.062255 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-gb5km" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.062664 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.064524 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.564502776 +0000 UTC m=+146.122829664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.064744 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.064947 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.070385 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cq794" podStartSLOduration=121.070355052 podStartE2EDuration="2m1.070355052s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:22.070237659 +0000 UTC m=+145.628564547" watchObservedRunningTime="2026-01-28 16:37:22.070355052 +0000 UTC m=+145.628681940" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.114737 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fclsq" podStartSLOduration=121.114700596 podStartE2EDuration="2m1.114700596s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:22.10737625 +0000 UTC m=+145.665703158" watchObservedRunningTime="2026-01-28 16:37:22.114700596 +0000 UTC m=+145.673027634" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.172522 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.176346 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.676327351 +0000 UTC m=+146.234654439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.179075 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podStartSLOduration=121.179046824 podStartE2EDuration="2m1.179046824s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:22.171508693 +0000 UTC m=+145.729835581" watchObservedRunningTime="2026-01-28 16:37:22.179046824 +0000 UTC m=+145.737373712" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.192081 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-hw4qp" podStartSLOduration=6.192051242 podStartE2EDuration="6.192051242s" podCreationTimestamp="2026-01-28 16:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:22.19165205 +0000 UTC m=+145.749978938" watchObservedRunningTime="2026-01-28 16:37:22.192051242 +0000 UTC m=+145.750378130" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.275455 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.276196 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.776168678 +0000 UTC m=+146.334495566 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.320385 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-vpzx9" podStartSLOduration=121.320348807 podStartE2EDuration="2m1.320348807s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:22.272847908 +0000 UTC m=+145.831174796" watchObservedRunningTime="2026-01-28 16:37:22.320348807 +0000 UTC m=+145.878675695" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.377731 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.378253 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.878233543 +0000 UTC m=+146.436560431 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.407946 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.416929 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.417014 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.479250 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.479713 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:22.979693162 +0000 UTC m=+146.538020050 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.582188 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.582888 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.082868437 +0000 UTC m=+146.641195325 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.683913 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.684457 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.184432558 +0000 UTC m=+146.742759446 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.785737 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.786161 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.286147535 +0000 UTC m=+146.844474413 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.887563 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.888560 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.388536218 +0000 UTC m=+146.946863096 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:22 crc kubenswrapper[4877]: I0128 16:37:22.990115 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:22 crc kubenswrapper[4877]: E0128 16:37:22.990545 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.490527362 +0000 UTC m=+147.048854260 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.099514 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.099872 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.599852671 +0000 UTC m=+147.158179559 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.155985 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" event={"ID":"135ced37-b13a-473b-950f-c1ce9567d15f","Type":"ContainerStarted","Data":"d0941a0b2bfa828c089185520ef98e44bb44b99e0a95d94e22bd57628499f790"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.201075 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.201943 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.701909076 +0000 UTC m=+147.260235954 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.217111 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" event={"ID":"0e771245-5049-41d1-a51f-f46222cd686b","Type":"ContainerStarted","Data":"0a5fd21d1f43c267072017fde505f5359b3b34c82ce3054a63c1b8cc299fceb5"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.261082 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" event={"ID":"17940ca1-0215-4491-a9f9-9177b04180d5","Type":"ContainerStarted","Data":"be04c7707496d959ce3b42ec77a4952825090c71cf8c7b2c8553a5b1d87b6fd4"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.262250 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.289630 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" event={"ID":"362f8c1a-4938-4ee4-853b-8f868147d732","Type":"ContainerStarted","Data":"f4a1bb9d723db085fcc9bd2da9fed57cbcb72f32292466b36b2fa05977e915ee"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.292405 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.305983 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.306067 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.306154 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.307355 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.807333791 +0000 UTC m=+147.365660679 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.311414 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-rjsfv" podStartSLOduration=122.3113901 podStartE2EDuration="2m2.3113901s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.184744258 +0000 UTC m=+146.743071146" watchObservedRunningTime="2026-01-28 16:37:23.3113901 +0000 UTC m=+146.869716988" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.315691 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" event={"ID":"7a651f66-8ee1-41d0-87a1-241bbedd4be8","Type":"ContainerStarted","Data":"e47d633c2ce4b526c6b279729f5deb5da082c388560a5dec40d39bef3907b788"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.315804 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" event={"ID":"7a651f66-8ee1-41d0-87a1-241bbedd4be8","Type":"ContainerStarted","Data":"3c65f93a8815200121bb88a86c65bc82bc07a422c9dd55e5223a0e1e7dc9535a"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.389437 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podStartSLOduration=122.389414753 podStartE2EDuration="2m2.389414753s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.376686493 +0000 UTC m=+146.935013381" watchObservedRunningTime="2026-01-28 16:37:23.389414753 +0000 UTC m=+146.947741641" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.390184 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" podStartSLOduration=122.390176963 podStartE2EDuration="2m2.390176963s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.313004592 +0000 UTC m=+146.871331480" watchObservedRunningTime="2026-01-28 16:37:23.390176963 +0000 UTC m=+146.948503851" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.416730 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" event={"ID":"3160a1cd-84f3-4e54-ae03-537f1b441c3a","Type":"ContainerStarted","Data":"a9f380e13b94e40630cd958f4b1f814b3881fca986bea84543a7c4c6fa185f2e"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.417177 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" event={"ID":"3160a1cd-84f3-4e54-ae03-537f1b441c3a","Type":"ContainerStarted","Data":"56e092ae0f9f63b1d70eb5802a9b52fcb5d487d4f6dd8fd368b2e6757bec41cf"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.419788 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.421731 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:23.921709925 +0000 UTC m=+147.480036813 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.433871 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:23 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:23 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:23 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.433926 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.434160 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" event={"ID":"a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f","Type":"ContainerStarted","Data":"aa5c8208cdb45cb3c10d7424d58678b7024bf8e7cf3c2c8771907c422f8161d2"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.436173 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.436229 4877 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-6q4hg container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" start-of-body= Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.436247 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" podUID="a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": dial tcp 10.217.0.34:8443: connect: connection refused" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.436986 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-rjg7v" podStartSLOduration=122.436954943 podStartE2EDuration="2m2.436954943s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.433734556 +0000 UTC m=+146.992061504" watchObservedRunningTime="2026-01-28 16:37:23.436954943 +0000 UTC m=+146.995281831" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.453898 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" event={"ID":"4ab01b2e-115e-4406-bfdf-9a9c2615ee5e","Type":"ContainerStarted","Data":"15e5a05391591ab429af73d5f742e2c0fc7b45525700328b9b085f2359d989d3"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.485622 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-s295n" event={"ID":"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2","Type":"ContainerStarted","Data":"626503d703dfdd68083d39a763ea812ad03c974a0f5c5a2a68cd1fbb6dd9961f"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.485710 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-s295n" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.516848 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" podStartSLOduration=122.516808155 podStartE2EDuration="2m2.516808155s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.511842682 +0000 UTC m=+147.070169570" watchObservedRunningTime="2026-01-28 16:37:23.516808155 +0000 UTC m=+147.075135053" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.527913 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.529041 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.029025561 +0000 UTC m=+147.587352449 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.555161 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" event={"ID":"d90e7c41-cead-4a7f-9369-425e27dccc26","Type":"ContainerStarted","Data":"b04df84db06cc885a32bcb1b05c842ffa6a2da4eaf31b4cf2a45e5cbca2bad83"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.589847 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" event={"ID":"1559cae1-e5e3-4f58-9412-d8bd25303099","Type":"ContainerStarted","Data":"82f74b9e08597826005a372e4654db1635295d6cbfaad3717aa90b61c7d31b9d"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.589909 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" event={"ID":"1559cae1-e5e3-4f58-9412-d8bd25303099","Type":"ContainerStarted","Data":"76fe35752acfb3f93c057f10ff65578a3c3a7d1e62046d0c7ac5b9c9d822ff2e"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.604806 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" event={"ID":"ffd807bc-800b-4108-b911-b8d41b86781d","Type":"ContainerStarted","Data":"1f52dcac5e6b412c28bee34b13da678a829ed91d8b29014afe4145dcd94dba65"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.630580 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.633103 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.133083329 +0000 UTC m=+147.691410217 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.649412 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" event={"ID":"50a9b1f2-6b58-4cde-90ed-8553152736e0","Type":"ContainerStarted","Data":"cbc39401969275a804f9372b45c76f6fd8455cbd132d17fd6757d50aa64a39a5"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.707831 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" event={"ID":"a7623e5a-223d-4da1-94fe-d671bfc4cb3d","Type":"ContainerStarted","Data":"5fa9393df711435717c3e4d673c049db38148b89faeb90f4352005c3694b2d52"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.708100 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-x8q7t" podStartSLOduration=122.708078851 podStartE2EDuration="2m2.708078851s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.619308152 +0000 UTC m=+147.177635040" watchObservedRunningTime="2026-01-28 16:37:23.708078851 +0000 UTC m=+147.266405739" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.708971 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.732106 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.733322 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.233296895 +0000 UTC m=+147.791623783 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.748085 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" event={"ID":"0c01a743-87ea-48f9-a8bd-69475721c4cc","Type":"ContainerStarted","Data":"af32560fc645cba103e975916ea03ad052ef773ccc447b81a5109fe374e74c06"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.767686 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-wq5bz" event={"ID":"92988ced-2e30-46f3-926b-9d8c8cd6f953","Type":"ContainerStarted","Data":"6cb61400ff7cea02143c5b177921d7ba79803e03eb197a43b0cdaed5ce47975c"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.779817 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-s295n" podStartSLOduration=7.7797821769999995 podStartE2EDuration="7.779782177s" podCreationTimestamp="2026-01-28 16:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.71698702 +0000 UTC m=+147.275313908" watchObservedRunningTime="2026-01-28 16:37:23.779782177 +0000 UTC m=+147.338109065" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.791281 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" event={"ID":"d4ddad9c-4170-48ae-9922-158233c9bce7","Type":"ContainerStarted","Data":"38224bf09e3e112da0db909728d45f6af012eae2cd6abb019a7b5a93191a0154"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.834651 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.837355 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.337341324 +0000 UTC m=+147.895668212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.839907 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" podStartSLOduration=122.839893882 podStartE2EDuration="2m2.839893882s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.779711155 +0000 UTC m=+147.338038053" watchObservedRunningTime="2026-01-28 16:37:23.839893882 +0000 UTC m=+147.398220770" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.840588 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" podStartSLOduration=122.84058419 podStartE2EDuration="2m2.84058419s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.839253474 +0000 UTC m=+147.397580362" watchObservedRunningTime="2026-01-28 16:37:23.84058419 +0000 UTC m=+147.398911078" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.845837 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" event={"ID":"63015241-3e17-41fe-aa5d-1aa0b707970b","Type":"ContainerStarted","Data":"8ad79259b4163d0b8fc4ec95dbfa602ccc802a4fd18b2178c17ed67262d0c353"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.873343 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" event={"ID":"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d","Type":"ContainerStarted","Data":"65403745a10fbd9bd67caaeadee6e7109d032d824a8c5be3c31ed6fb8ae46e8e"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.908009 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-mq2n8" podStartSLOduration=122.907985199 podStartE2EDuration="2m2.907985199s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.905069312 +0000 UTC m=+147.463396210" watchObservedRunningTime="2026-01-28 16:37:23.907985199 +0000 UTC m=+147.466312077" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.911563 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" event={"ID":"587b4df1-7315-4a7f-b416-d2e3ff99fd0d","Type":"ContainerStarted","Data":"ed97ff0eaabbe956455402550e7465babb30e5600133984b95a9c60955aa8927"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.911620 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" event={"ID":"587b4df1-7315-4a7f-b416-d2e3ff99fd0d","Type":"ContainerStarted","Data":"79be4f738d44f1a3c6bd8779b974ee354fea0a13df1ce72d84308f828f1781c3"} Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.922647 4877 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-4vk27 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.922649 4877 patch_prober.go:28] interesting pod/downloads-7954f5f757-vpzx9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.922719 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.922799 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vpzx9" podUID="754cf791-541c-4944-bf3e-7ba18f44d8de" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.927873 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.939106 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:23 crc kubenswrapper[4877]: E0128 16:37:23.940394 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.440377414 +0000 UTC m=+147.998704302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.954178 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" podStartSLOduration=122.954158782 podStartE2EDuration="2m2.954158782s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.952900729 +0000 UTC m=+147.511227617" watchObservedRunningTime="2026-01-28 16:37:23.954158782 +0000 UTC m=+147.512485670" Jan 28 16:37:23 crc kubenswrapper[4877]: I0128 16:37:23.990845 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-dxbts" podStartSLOduration=122.990815001 podStartE2EDuration="2m2.990815001s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:23.989842766 +0000 UTC m=+147.548169654" watchObservedRunningTime="2026-01-28 16:37:23.990815001 +0000 UTC m=+147.549141889" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.044135 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.050756 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.550736271 +0000 UTC m=+148.109063159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.145395 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.145650 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.645608565 +0000 UTC m=+148.203935443 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.146328 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.147056 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.647047713 +0000 UTC m=+148.205374591 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.179129 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-qlrhq" podStartSLOduration=123.179104109 podStartE2EDuration="2m3.179104109s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:24.178784221 +0000 UTC m=+147.737111109" watchObservedRunningTime="2026-01-28 16:37:24.179104109 +0000 UTC m=+147.737430997" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.259715 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.260160 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.760144743 +0000 UTC m=+148.318471621 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.287075 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" podStartSLOduration=123.287043461 podStartE2EDuration="2m3.287043461s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:24.258276503 +0000 UTC m=+147.816603391" watchObservedRunningTime="2026-01-28 16:37:24.287043461 +0000 UTC m=+147.845370349" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.330297 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.349955 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" podStartSLOduration=123.34991693 podStartE2EDuration="2m3.34991693s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:24.289093795 +0000 UTC m=+147.847420683" watchObservedRunningTime="2026-01-28 16:37:24.34991693 +0000 UTC m=+147.908243818" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.365386 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.365793 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.865779873 +0000 UTC m=+148.424106761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.408707 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-wq5bz" podStartSLOduration=8.408669169 podStartE2EDuration="8.408669169s" podCreationTimestamp="2026-01-28 16:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:24.351056131 +0000 UTC m=+147.909383019" watchObservedRunningTime="2026-01-28 16:37:24.408669169 +0000 UTC m=+147.966996057" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.414527 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:24 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:24 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:24 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.414603 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.441537 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-s4swd" podStartSLOduration=123.441518766 podStartE2EDuration="2m3.441518766s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:24.438369432 +0000 UTC m=+147.996696320" watchObservedRunningTime="2026-01-28 16:37:24.441518766 +0000 UTC m=+147.999845654" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.443075 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" podStartSLOduration=124.443067687 podStartE2EDuration="2m4.443067687s" podCreationTimestamp="2026-01-28 16:35:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:24.409709027 +0000 UTC m=+147.968035915" watchObservedRunningTime="2026-01-28 16:37:24.443067687 +0000 UTC m=+148.001394575" Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.467280 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.467733 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:24.967685755 +0000 UTC m=+148.526012643 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.569069 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.570005 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.069988666 +0000 UTC m=+148.628315544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.672046 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.672822 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.17277092 +0000 UTC m=+148.731097808 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.774555 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.775175 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.275142145 +0000 UTC m=+148.833469213 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.876361 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.876674 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.376624734 +0000 UTC m=+148.934951622 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.877059 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.877601 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.37758085 +0000 UTC m=+148.935907738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.931597 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" event={"ID":"048e6bc5-dad4-423f-a249-7c4addf02947","Type":"ContainerStarted","Data":"91ae02c126218cc63d39302f5a304590b4f7736b771575431f7f8097973b60cf"} Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.931684 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" event={"ID":"048e6bc5-dad4-423f-a249-7c4addf02947","Type":"ContainerStarted","Data":"5ef897f5f87c4676966ab92b6e7d8ca3d60cac489d2cf1424d6cd9c8150788d8"} Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.953443 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" event={"ID":"0e771245-5049-41d1-a51f-f46222cd686b","Type":"ContainerStarted","Data":"4be5f2c4de6da3932093245da9d5a07a94277c2f7b82c10d4915fa30541c5bb0"} Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.977299 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" event={"ID":"e6299f68-b95a-4cf2-b4ea-5ab666b0f21d","Type":"ContainerStarted","Data":"e8dcb8c8c53929c090bfe68d65f93f918601d8f9c11dcf4bb2d271d3ed4cf064"} Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.978088 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.978489 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.478432293 +0000 UTC m=+149.036759181 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.978656 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:24 crc kubenswrapper[4877]: E0128 16:37:24.979133 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.47908699 +0000 UTC m=+149.037413878 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.984391 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mvwft" event={"ID":"ffd807bc-800b-4108-b911-b8d41b86781d","Type":"ContainerStarted","Data":"c8cd1d271ac93aba7d16d4d579f23c1764e6c3e793959894410087d1d322962c"} Jan 28 16:37:24 crc kubenswrapper[4877]: I0128 16:37:24.989620 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-s295n" event={"ID":"2b9a77c6-3ca1-4697-8e5c-d98a7158c5d2","Type":"ContainerStarted","Data":"a8b31fae6e0839533810c893508ed721dca9e0cdc9e55b72bb8a12c68f5d84c6"} Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.010802 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-8n6hm" event={"ID":"d90e7c41-cead-4a7f-9369-425e27dccc26","Type":"ContainerStarted","Data":"d92285a916c803de8cf8c79ea2be0499f048c30441bb11cc9de3282455240931"} Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.020026 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mdr8p" podStartSLOduration=124.020002332 podStartE2EDuration="2m4.020002332s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:25.019730165 +0000 UTC m=+148.578057063" watchObservedRunningTime="2026-01-28 16:37:25.020002332 +0000 UTC m=+148.578329220" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.020270 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" event={"ID":"a7623e5a-223d-4da1-94fe-d671bfc4cb3d","Type":"ContainerStarted","Data":"db6d3792d05abbcfb8faff5aadd4a33c368e01a5b4ac654ce06b9c5a44e25c3a"} Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.047990 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" event={"ID":"7a651f66-8ee1-41d0-87a1-241bbedd4be8","Type":"ContainerStarted","Data":"76bb493df08a037eebb2bf42070be117dd3336443d5cbf392a19ec52494242ed"} Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.053784 4877 patch_prober.go:28] interesting pod/downloads-7954f5f757-vpzx9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.053871 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vpzx9" podUID="754cf791-541c-4944-bf3e-7ba18f44d8de" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.058225 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.075847 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.083191 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.084411 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.584389631 +0000 UTC m=+149.142716519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.103341 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.109187 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-6c6br" podStartSLOduration=124.109166473 podStartE2EDuration="2m4.109166473s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:25.102749912 +0000 UTC m=+148.661076800" watchObservedRunningTime="2026-01-28 16:37:25.109166473 +0000 UTC m=+148.667493361" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.186744 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.186846 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.186890 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.187078 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.187150 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.193081 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.693059313 +0000 UTC m=+149.251386371 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.232568 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.251180 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.259222 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.285245 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.294560 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.295045 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.795022726 +0000 UTC m=+149.353349614 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.362058 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.390840 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.401456 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.401937 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:25.90191813 +0000 UTC m=+149.460245018 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.411317 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:25 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:25 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:25 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.411386 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.447825 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.504254 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.504631 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.004608593 +0000 UTC m=+149.562935481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.606518 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.606935 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.106917394 +0000 UTC m=+149.665244282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.712198 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.712442 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.212418151 +0000 UTC m=+149.770745039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.712551 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.713001 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.212992346 +0000 UTC m=+149.771319234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.822165 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.838079 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.836392 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.836494 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.336456803 +0000 UTC m=+149.894783691 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.838795 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.839372 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.339364121 +0000 UTC m=+149.897691009 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.863175 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.867558 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.887569 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.942190 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.942450 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b20edd5d-4283-441b-9c08-7fe7d530e0af-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:25 crc kubenswrapper[4877]: I0128 16:37:25.942569 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b20edd5d-4283-441b-9c08-7fe7d530e0af-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:25 crc kubenswrapper[4877]: E0128 16:37:25.942735 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.44271757 +0000 UTC m=+150.001044458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.046217 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b20edd5d-4283-441b-9c08-7fe7d530e0af-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.046702 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.046742 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b20edd5d-4283-441b-9c08-7fe7d530e0af-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.047111 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b20edd5d-4283-441b-9c08-7fe7d530e0af-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.047404 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.547391265 +0000 UTC m=+150.105718153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.058709 4877 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pwhsx container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.058794 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" podUID="17940ca1-0215-4491-a9f9-9177b04180d5" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.085277 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-prvss"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.086381 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.099347 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.112890 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" event={"ID":"048e6bc5-dad4-423f-a249-7c4addf02947","Type":"ContainerStarted","Data":"9922d8ab97514cb04e2bc61b52e706dae11ec2d9a87c11968898471467d3f6bb"} Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.124572 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-prvss"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.149344 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.149681 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvclq\" (UniqueName: \"kubernetes.io/projected/fa400ca9-c7cc-482b-af01-6743a80710fe-kube-api-access-kvclq\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.149749 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-utilities\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.149766 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-catalog-content\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.149909 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.649886982 +0000 UTC m=+150.208213870 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.169729 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b20edd5d-4283-441b-9c08-7fe7d530e0af-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.241059 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.272425 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sszxr"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.274190 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.274325 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvclq\" (UniqueName: \"kubernetes.io/projected/fa400ca9-c7cc-482b-af01-6743a80710fe-kube-api-access-kvclq\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.274747 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-utilities\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.274791 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-catalog-content\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.275878 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-catalog-content\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.282177 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.287418 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.787395504 +0000 UTC m=+150.345722392 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.296539 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.296766 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sszxr"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.306760 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-utilities\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.379528 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.380063 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-catalog-content\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.380109 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq772\" (UniqueName: \"kubernetes.io/projected/8d139104-17f5-47de-a21d-08340d961df3-kube-api-access-cq772\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.380175 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-utilities\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.380326 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:26.880302765 +0000 UTC m=+150.438629653 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.466502 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvclq\" (UniqueName: \"kubernetes.io/projected/fa400ca9-c7cc-482b-af01-6743a80710fe-kube-api-access-kvclq\") pod \"community-operators-prvss\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.488287 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.495955 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:26 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:26 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:26 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.496075 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.506182 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-utilities\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.506697 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-catalog-content\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.506742 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.506782 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq772\" (UniqueName: \"kubernetes.io/projected/8d139104-17f5-47de-a21d-08340d961df3-kube-api-access-cq772\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.507144 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-utilities\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.507411 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-catalog-content\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.507725 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.007709397 +0000 UTC m=+150.566036285 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.523948 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.574779 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq772\" (UniqueName: \"kubernetes.io/projected/8d139104-17f5-47de-a21d-08340d961df3-kube-api-access-cq772\") pod \"certified-operators-sszxr\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.584487 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2m2k4"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.586176 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.608790 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.609692 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.109676049 +0000 UTC m=+150.668002937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.664716 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2m2k4"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.710704 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.712084 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-utilities\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.712150 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-catalog-content\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.712179 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqc5k\" (UniqueName: \"kubernetes.io/projected/f76979dc-a93e-496f-b9ac-e3f0710c2899-kube-api-access-jqc5k\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.715126 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.215105544 +0000 UTC m=+150.773432432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.772511 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q5lsk"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.787956 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.795014 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.813245 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.813598 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-utilities\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.813649 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-catalog-content\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.813677 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqc5k\" (UniqueName: \"kubernetes.io/projected/f76979dc-a93e-496f-b9ac-e3f0710c2899-kube-api-access-jqc5k\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.813725 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-utilities\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.813795 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79wvd\" (UniqueName: \"kubernetes.io/projected/db5392ea-c535-46b8-80cf-7fc0b43bf1de-kube-api-access-79wvd\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.813828 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-catalog-content\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.814008 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.313983195 +0000 UTC m=+150.872310083 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.814579 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-utilities\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.814637 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-catalog-content\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.819881 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q5lsk"] Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.862883 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqc5k\" (UniqueName: \"kubernetes.io/projected/f76979dc-a93e-496f-b9ac-e3f0710c2899-kube-api-access-jqc5k\") pod \"community-operators-2m2k4\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.915548 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-utilities\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.915621 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.915653 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79wvd\" (UniqueName: \"kubernetes.io/projected/db5392ea-c535-46b8-80cf-7fc0b43bf1de-kube-api-access-79wvd\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.915675 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-catalog-content\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.916180 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-catalog-content\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: I0128 16:37:26.916529 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-utilities\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:26 crc kubenswrapper[4877]: E0128 16:37:26.916869 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.416851241 +0000 UTC m=+150.975178129 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.017712 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.018124 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.017968 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.51793772 +0000 UTC m=+151.076264608 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.028098 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.028713 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.528691708 +0000 UTC m=+151.087018596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.053670 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79wvd\" (UniqueName: \"kubernetes.io/projected/db5392ea-c535-46b8-80cf-7fc0b43bf1de-kube-api-access-79wvd\") pod \"certified-operators-q5lsk\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.133011 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.133422 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.633404974 +0000 UTC m=+151.191731862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.135659 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"73a50b3459e38e9112fa46f96499c19aa086b9a84a455f26e4249e0eff5e0d1a"} Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.148531 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4bc70371f6f40c8f1ac718eb89f0592b9b11ea21d506b62da4e5e1481239a290"} Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.151167 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.235084 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.238928 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.73890483 +0000 UTC m=+151.297231718 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.335966 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.336512 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.836463196 +0000 UTC m=+151.394790084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.398427 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.411720 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:27 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:27 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:27 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.411801 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.438329 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.438670 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:27.938656805 +0000 UTC m=+151.496983693 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.448197 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-prvss"] Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.543625 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.544041 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.043981577 +0000 UTC m=+151.602308465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.544142 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.544610 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.044593813 +0000 UTC m=+151.602920691 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.557773 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sszxr"] Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.584762 4877 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.645176 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.645668 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.145636471 +0000 UTC m=+151.703963359 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.750344 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.751264 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.251238321 +0000 UTC m=+151.809565219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.852120 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.853073 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.35305215 +0000 UTC m=+151.911379038 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.853123 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.853526 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.353520142 +0000 UTC m=+151.911847030 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.931582 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q5lsk"] Jan 28 16:37:27 crc kubenswrapper[4877]: I0128 16:37:27.954521 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:27 crc kubenswrapper[4877]: E0128 16:37:27.954971 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.454952961 +0000 UTC m=+152.013279849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.040602 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d2vhl"] Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.042075 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.048192 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.073573 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-catalog-content\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.073659 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.073794 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-utilities\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.073860 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fm9m\" (UniqueName: \"kubernetes.io/projected/4a7c4fb9-52e4-4736-9165-b793c332af0d-kube-api-access-7fm9m\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: E0128 16:37:28.074410 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.574375429 +0000 UTC m=+152.132702317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.113009 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2vhl"] Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.139679 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2m2k4"] Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.157929 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2m2k4" event={"ID":"f76979dc-a93e-496f-b9ac-e3f0710c2899","Type":"ContainerStarted","Data":"a3c484671da2999b4e9e579b600bd5f3904e3e7d93200aac3f3063a24c231f4a"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.159613 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5lsk" event={"ID":"db5392ea-c535-46b8-80cf-7fc0b43bf1de","Type":"ContainerStarted","Data":"e5067ed33fba89eb609844e65fb6431d0d88d65abba7e21a29190ed4875d7a53"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.161279 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"2ed2c0c6c9fda8612703bd4e741920a80efa2414625baee20d1a063f3f7d8e8c"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.161308 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c415cfc0f6f891ed0852a858b40408c664be51975a012e90f548b425c089ebb1"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.165119 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" event={"ID":"048e6bc5-dad4-423f-a249-7c4addf02947","Type":"ContainerStarted","Data":"90c8c2fed2233ad4f30c1f91a8bc8e8bb537e3d5ae4ab69f1cc608b26bb593a8"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.169400 4877 generic.go:334] "Generic (PLEG): container finished" podID="8d139104-17f5-47de-a21d-08340d961df3" containerID="e2a2591e6df0223533c5b5d0af67cf2a920b1cf1752bc415d01141985cd56390" exitCode=0 Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.169458 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sszxr" event={"ID":"8d139104-17f5-47de-a21d-08340d961df3","Type":"ContainerDied","Data":"e2a2591e6df0223533c5b5d0af67cf2a920b1cf1752bc415d01141985cd56390"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.169497 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sszxr" event={"ID":"8d139104-17f5-47de-a21d-08340d961df3","Type":"ContainerStarted","Data":"7c72810bfa3fe755db96251cd41729804708bd209c7e59a8dd3b5484e68f6e46"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.174583 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.174739 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-utilities\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.174789 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fm9m\" (UniqueName: \"kubernetes.io/projected/4a7c4fb9-52e4-4736-9165-b793c332af0d-kube-api-access-7fm9m\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.174812 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-catalog-content\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.176788 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"b20edd5d-4283-441b-9c08-7fe7d530e0af","Type":"ContainerStarted","Data":"ceb91492f9060ecec975544d975e99bd9bbe38f8486a2ef8bb8693aafb33e907"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.177384 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-utilities\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: E0128 16:37:28.177414 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.677373389 +0000 UTC m=+152.235700417 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.177764 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-catalog-content\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.179254 4877 generic.go:334] "Generic (PLEG): container finished" podID="587b4df1-7315-4a7f-b416-d2e3ff99fd0d" containerID="ed97ff0eaabbe956455402550e7465babb30e5600133984b95a9c60955aa8927" exitCode=0 Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.179361 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" event={"ID":"587b4df1-7315-4a7f-b416-d2e3ff99fd0d","Type":"ContainerDied","Data":"ed97ff0eaabbe956455402550e7465babb30e5600133984b95a9c60955aa8927"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.179971 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.190974 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9a8a98d301eee3b8fc7a369c62e7d811c6967c8fe34dbc19894a5f9077d956bb"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.198848 4877 generic.go:334] "Generic (PLEG): container finished" podID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerID="da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d" exitCode=0 Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.199012 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-prvss" event={"ID":"fa400ca9-c7cc-482b-af01-6743a80710fe","Type":"ContainerDied","Data":"da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.199045 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-prvss" event={"ID":"fa400ca9-c7cc-482b-af01-6743a80710fe","Type":"ContainerStarted","Data":"9fefed3eabfff26ed0cea8955fd0508c1bace142abaed454b0d598d384413bfa"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.203312 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8c8508ec0b14f5bdc1c61ff319dbc0fcfbaa49f61f49ad316fa3adb503ff158e"} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.203460 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.205826 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fm9m\" (UniqueName: \"kubernetes.io/projected/4a7c4fb9-52e4-4736-9165-b793c332af0d-kube-api-access-7fm9m\") pod \"redhat-marketplace-d2vhl\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.222769 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" podStartSLOduration=12.222746351 podStartE2EDuration="12.222746351s" podCreationTimestamp="2026-01-28 16:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:28.221321733 +0000 UTC m=+151.779648621" watchObservedRunningTime="2026-01-28 16:37:28.222746351 +0000 UTC m=+151.781073239" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.257505 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.262299 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.262928 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.271819 4877 patch_prober.go:28] interesting pod/console-f9d7485db-f5npr container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.271890 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-f5npr" podUID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.276937 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:28 crc kubenswrapper[4877]: E0128 16:37:28.279832 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.779810995 +0000 UTC m=+152.338137883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.377556 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:28 crc kubenswrapper[4877]: E0128 16:37:28.377729 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.877703129 +0000 UTC m=+152.436030017 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.378110 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:28 crc kubenswrapper[4877]: E0128 16:37:28.378589 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.878579572 +0000 UTC m=+152.436906460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kzc7h" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.401139 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.401204 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.413211 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:28 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:28 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:28 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.413295 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.413349 4877 patch_prober.go:28] interesting pod/apiserver-76f77b778f-2m5lt container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]log ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]etcd ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/generic-apiserver-start-informers ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/max-in-flight-filter ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 28 16:37:28 crc kubenswrapper[4877]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 28 16:37:28 crc kubenswrapper[4877]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/project.openshift.io-projectcache ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/openshift.io-startinformers ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 28 16:37:28 crc kubenswrapper[4877]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 28 16:37:28 crc kubenswrapper[4877]: livez check failed Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.413459 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" podUID="e6299f68-b95a-4cf2-b4ea-5ab666b0f21d" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.418582 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.418631 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.427344 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.448598 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hx6kq"] Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.457527 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.474663 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hx6kq"] Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.482271 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.482481 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-catalog-content\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.482564 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-utilities\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.482694 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tth6\" (UniqueName: \"kubernetes.io/projected/cad628ad-2502-408b-ab7a-4a5be2d1637f-kube-api-access-6tth6\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: E0128 16:37:28.483647 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 16:37:28.983630947 +0000 UTC m=+152.541957835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.536336 4877 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-28T16:37:27.584992301Z","Handler":null,"Name":""} Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.551883 4877 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.551949 4877 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.589434 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-catalog-content\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.589559 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.589612 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-utilities\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.589747 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tth6\" (UniqueName: \"kubernetes.io/projected/cad628ad-2502-408b-ab7a-4a5be2d1637f-kube-api-access-6tth6\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.589902 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-catalog-content\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.590958 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-utilities\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.603527 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.603594 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.613885 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tth6\" (UniqueName: \"kubernetes.io/projected/cad628ad-2502-408b-ab7a-4a5be2d1637f-kube-api-access-6tth6\") pod \"redhat-marketplace-hx6kq\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.618787 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2vhl"] Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.634979 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kzc7h\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.690516 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.701852 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.712849 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:28 crc kubenswrapper[4877]: I0128 16:37:28.802331 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.014233 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzc7h"] Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.076220 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hx6kq"] Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.217135 4877 generic.go:334] "Generic (PLEG): container finished" podID="b20edd5d-4283-441b-9c08-7fe7d530e0af" containerID="f12957b1f4603f4c87d00c480b2ec130d5465c0eb9be1543d5c463a9e2dd1f8f" exitCode=0 Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.217209 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"b20edd5d-4283-441b-9c08-7fe7d530e0af","Type":"ContainerDied","Data":"f12957b1f4603f4c87d00c480b2ec130d5465c0eb9be1543d5c463a9e2dd1f8f"} Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.227749 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hx6kq" event={"ID":"cad628ad-2502-408b-ab7a-4a5be2d1637f","Type":"ContainerStarted","Data":"c8cc4035ce257bb0d479e24f813d3f36a76ac2386d4e6afcdd14483be805a8e9"} Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.232006 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vxfw7"] Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.233630 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" event={"ID":"3d4b7ce0-783a-44b4-9604-8ef0d398fec7","Type":"ContainerStarted","Data":"f703bfafa2e1fd1c7931f27789a95b3bcb62f15e2a9299f79e3c60a1ae86b063"} Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.233895 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.239819 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.244712 4877 generic.go:334] "Generic (PLEG): container finished" podID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerID="4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46" exitCode=0 Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.245047 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2m2k4" event={"ID":"f76979dc-a93e-496f-b9ac-e3f0710c2899","Type":"ContainerDied","Data":"4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46"} Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.246661 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vxfw7"] Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.285782 4877 generic.go:334] "Generic (PLEG): container finished" podID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerID="96b9c13db08a7b85231276ce730f5127e88f0afbf56d92336c60d16a4ac02fe1" exitCode=0 Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.287328 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5lsk" event={"ID":"db5392ea-c535-46b8-80cf-7fc0b43bf1de","Type":"ContainerDied","Data":"96b9c13db08a7b85231276ce730f5127e88f0afbf56d92336c60d16a4ac02fe1"} Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.290936 4877 generic.go:334] "Generic (PLEG): container finished" podID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerID="6ed33e262f684582244fad98f3992f33fe9aa017f18232b41ebe43a2f64b7204" exitCode=0 Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.292024 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2vhl" event={"ID":"4a7c4fb9-52e4-4736-9165-b793c332af0d","Type":"ContainerDied","Data":"6ed33e262f684582244fad98f3992f33fe9aa017f18232b41ebe43a2f64b7204"} Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.292070 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2vhl" event={"ID":"4a7c4fb9-52e4-4736-9165-b793c332af0d","Type":"ContainerStarted","Data":"917adc26030adbd57533c9e83bfcf8a1c3e4804bfb662205e5f775ce20a002e2"} Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.303899 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-utilities\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.303937 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-catalog-content\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.304072 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqdqb\" (UniqueName: \"kubernetes.io/projected/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-kube-api-access-bqdqb\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.308364 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4wpq4" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.343571 4877 patch_prober.go:28] interesting pod/downloads-7954f5f757-vpzx9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.343647 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vpzx9" podUID="754cf791-541c-4944-bf3e-7ba18f44d8de" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.344081 4877 patch_prober.go:28] interesting pod/downloads-7954f5f757-vpzx9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.344110 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vpzx9" podUID="754cf791-541c-4944-bf3e-7ba18f44d8de" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.347808 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.405246 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-utilities\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.405301 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-catalog-content\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.405348 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqdqb\" (UniqueName: \"kubernetes.io/projected/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-kube-api-access-bqdqb\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.407111 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-utilities\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.407367 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-catalog-content\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.408924 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.420227 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:29 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:29 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:29 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.420311 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.437771 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqdqb\" (UniqueName: \"kubernetes.io/projected/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-kube-api-access-bqdqb\") pod \"redhat-operators-vxfw7\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.564043 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.639190 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fmnbl"] Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.641971 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.662459 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fmnbl"] Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.706545 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.815325 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-config-volume\") pod \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.815564 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-secret-volume\") pod \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.815614 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smlnq\" (UniqueName: \"kubernetes.io/projected/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-kube-api-access-smlnq\") pod \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\" (UID: \"587b4df1-7315-4a7f-b416-d2e3ff99fd0d\") " Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.815865 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgcpj\" (UniqueName: \"kubernetes.io/projected/7fe92108-0e44-423d-b939-5ee8aec6c82f-kube-api-access-fgcpj\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.815898 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-catalog-content\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.815963 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-utilities\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.817362 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-config-volume" (OuterVolumeSpecName: "config-volume") pod "587b4df1-7315-4a7f-b416-d2e3ff99fd0d" (UID: "587b4df1-7315-4a7f-b416-d2e3ff99fd0d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.824957 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "587b4df1-7315-4a7f-b416-d2e3ff99fd0d" (UID: "587b4df1-7315-4a7f-b416-d2e3ff99fd0d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.858721 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-kube-api-access-smlnq" (OuterVolumeSpecName: "kube-api-access-smlnq") pod "587b4df1-7315-4a7f-b416-d2e3ff99fd0d" (UID: "587b4df1-7315-4a7f-b416-d2e3ff99fd0d"). InnerVolumeSpecName "kube-api-access-smlnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.916903 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgcpj\" (UniqueName: \"kubernetes.io/projected/7fe92108-0e44-423d-b939-5ee8aec6c82f-kube-api-access-fgcpj\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.916974 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-catalog-content\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.917035 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-utilities\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.918810 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smlnq\" (UniqueName: \"kubernetes.io/projected/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-kube-api-access-smlnq\") on node \"crc\" DevicePath \"\"" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.919372 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-catalog-content\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.919622 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-utilities\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.920378 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.920392 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/587b4df1-7315-4a7f-b416-d2e3ff99fd0d-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.970749 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgcpj\" (UniqueName: \"kubernetes.io/projected/7fe92108-0e44-423d-b939-5ee8aec6c82f-kube-api-access-fgcpj\") pod \"redhat-operators-fmnbl\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:29 crc kubenswrapper[4877]: I0128 16:37:29.985464 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.147881 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vxfw7"] Jan 28 16:37:30 crc kubenswrapper[4877]: W0128 16:37:30.175838 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22e4ba64_0a17_4ea7_8b9c_aa09d864be39.slice/crio-fbc5ab71c9848ba1993c519ca523147becb07cd173e896d994666e33d207887c WatchSource:0}: Error finding container fbc5ab71c9848ba1993c519ca523147becb07cd173e896d994666e33d207887c: Status 404 returned error can't find the container with id fbc5ab71c9848ba1993c519ca523147becb07cd173e896d994666e33d207887c Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.420837 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:30 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:30 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:30 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.420932 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.440753 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" event={"ID":"3d4b7ce0-783a-44b4-9604-8ef0d398fec7","Type":"ContainerStarted","Data":"4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039"} Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.440950 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.444437 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxfw7" event={"ID":"22e4ba64-0a17-4ea7-8b9c-aa09d864be39","Type":"ContainerStarted","Data":"fbc5ab71c9848ba1993c519ca523147becb07cd173e896d994666e33d207887c"} Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.449214 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" event={"ID":"587b4df1-7315-4a7f-b416-d2e3ff99fd0d","Type":"ContainerDied","Data":"79be4f738d44f1a3c6bd8779b974ee354fea0a13df1ce72d84308f828f1781c3"} Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.449269 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79be4f738d44f1a3c6bd8779b974ee354fea0a13df1ce72d84308f828f1781c3" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.449342 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.456553 4877 generic.go:334] "Generic (PLEG): container finished" podID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerID="9feffce29fbf8a73bbfee10e9cbee0563c9a9b3618acc6d08257c6283b2efd1a" exitCode=0 Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.457618 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hx6kq" event={"ID":"cad628ad-2502-408b-ab7a-4a5be2d1637f","Type":"ContainerDied","Data":"9feffce29fbf8a73bbfee10e9cbee0563c9a9b3618acc6d08257c6283b2efd1a"} Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.499798 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" podStartSLOduration=129.499763021 podStartE2EDuration="2m9.499763021s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:30.465263299 +0000 UTC m=+154.023590197" watchObservedRunningTime="2026-01-28 16:37:30.499763021 +0000 UTC m=+154.058089909" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.528842 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fmnbl"] Jan 28 16:37:30 crc kubenswrapper[4877]: W0128 16:37:30.540040 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7fe92108_0e44_423d_b939_5ee8aec6c82f.slice/crio-b04b600372cd3d7baf626d5dad5b7b1ef52f2ef1ba10ffe0502db5cc07cfea6f WatchSource:0}: Error finding container b04b600372cd3d7baf626d5dad5b7b1ef52f2ef1ba10ffe0502db5cc07cfea6f: Status 404 returned error can't find the container with id b04b600372cd3d7baf626d5dad5b7b1ef52f2ef1ba10ffe0502db5cc07cfea6f Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.543688 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 16:37:30 crc kubenswrapper[4877]: E0128 16:37:30.547505 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="587b4df1-7315-4a7f-b416-d2e3ff99fd0d" containerName="collect-profiles" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.547549 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="587b4df1-7315-4a7f-b416-d2e3ff99fd0d" containerName="collect-profiles" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.547791 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="587b4df1-7315-4a7f-b416-d2e3ff99fd0d" containerName="collect-profiles" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.548456 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.551079 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.556134 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.556230 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.737155 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.738008 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.784281 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.839318 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.839391 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.840049 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.861960 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.914749 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.941022 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b20edd5d-4283-441b-9c08-7fe7d530e0af-kube-api-access\") pod \"b20edd5d-4283-441b-9c08-7fe7d530e0af\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.941291 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b20edd5d-4283-441b-9c08-7fe7d530e0af-kubelet-dir\") pod \"b20edd5d-4283-441b-9c08-7fe7d530e0af\" (UID: \"b20edd5d-4283-441b-9c08-7fe7d530e0af\") " Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.941765 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b20edd5d-4283-441b-9c08-7fe7d530e0af-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b20edd5d-4283-441b-9c08-7fe7d530e0af" (UID: "b20edd5d-4283-441b-9c08-7fe7d530e0af"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:37:30 crc kubenswrapper[4877]: I0128 16:37:30.950968 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b20edd5d-4283-441b-9c08-7fe7d530e0af-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b20edd5d-4283-441b-9c08-7fe7d530e0af" (UID: "b20edd5d-4283-441b-9c08-7fe7d530e0af"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.043782 4877 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b20edd5d-4283-441b-9c08-7fe7d530e0af-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.043826 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b20edd5d-4283-441b-9c08-7fe7d530e0af-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.414655 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:31 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:31 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:31 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.415161 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.435897 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.467578 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"f478db55-6d4d-4975-bc73-2ea2cce06bc4","Type":"ContainerStarted","Data":"24081af958e7bb36fd646b996e4afa33a4d06586daf65326369ada5ae4ea3081"} Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.470075 4877 generic.go:334] "Generic (PLEG): container finished" podID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerID="077892604d74eb280d0080cadc1c8db37de5076c4ff07ccee60097f874caa07f" exitCode=0 Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.470163 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxfw7" event={"ID":"22e4ba64-0a17-4ea7-8b9c-aa09d864be39","Type":"ContainerDied","Data":"077892604d74eb280d0080cadc1c8db37de5076c4ff07ccee60097f874caa07f"} Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.475916 4877 generic.go:334] "Generic (PLEG): container finished" podID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerID="ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7" exitCode=0 Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.476067 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fmnbl" event={"ID":"7fe92108-0e44-423d-b939-5ee8aec6c82f","Type":"ContainerDied","Data":"ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7"} Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.476109 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fmnbl" event={"ID":"7fe92108-0e44-423d-b939-5ee8aec6c82f","Type":"ContainerStarted","Data":"b04b600372cd3d7baf626d5dad5b7b1ef52f2ef1ba10ffe0502db5cc07cfea6f"} Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.483680 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"b20edd5d-4283-441b-9c08-7fe7d530e0af","Type":"ContainerDied","Data":"ceb91492f9060ecec975544d975e99bd9bbe38f8486a2ef8bb8693aafb33e907"} Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.483746 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ceb91492f9060ecec975544d975e99bd9bbe38f8486a2ef8bb8693aafb33e907" Jan 28 16:37:31 crc kubenswrapper[4877]: I0128 16:37:31.483764 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 16:37:32 crc kubenswrapper[4877]: I0128 16:37:32.423833 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:32 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:32 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:32 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:32 crc kubenswrapper[4877]: I0128 16:37:32.424142 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:32 crc kubenswrapper[4877]: I0128 16:37:32.533334 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"f478db55-6d4d-4975-bc73-2ea2cce06bc4","Type":"ContainerStarted","Data":"41958ac5b9ffbc993c91e5f1358e3ebe841e9ecea9fbe095f78381d1e91d73b8"} Jan 28 16:37:32 crc kubenswrapper[4877]: I0128 16:37:32.577058 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.577025197 podStartE2EDuration="2.577025197s" podCreationTimestamp="2026-01-28 16:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:37:32.560702111 +0000 UTC m=+156.119029009" watchObservedRunningTime="2026-01-28 16:37:32.577025197 +0000 UTC m=+156.135352075" Jan 28 16:37:33 crc kubenswrapper[4877]: I0128 16:37:33.411185 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:33 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:33 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:33 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:33 crc kubenswrapper[4877]: I0128 16:37:33.411246 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:33 crc kubenswrapper[4877]: I0128 16:37:33.414376 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:33 crc kubenswrapper[4877]: I0128 16:37:33.430681 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-2m5lt" Jan 28 16:37:33 crc kubenswrapper[4877]: I0128 16:37:33.557849 4877 generic.go:334] "Generic (PLEG): container finished" podID="f478db55-6d4d-4975-bc73-2ea2cce06bc4" containerID="41958ac5b9ffbc993c91e5f1358e3ebe841e9ecea9fbe095f78381d1e91d73b8" exitCode=0 Jan 28 16:37:33 crc kubenswrapper[4877]: I0128 16:37:33.558096 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"f478db55-6d4d-4975-bc73-2ea2cce06bc4","Type":"ContainerDied","Data":"41958ac5b9ffbc993c91e5f1358e3ebe841e9ecea9fbe095f78381d1e91d73b8"} Jan 28 16:37:33 crc kubenswrapper[4877]: I0128 16:37:33.880345 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-s295n" Jan 28 16:37:34 crc kubenswrapper[4877]: I0128 16:37:34.409904 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:34 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:34 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:34 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:34 crc kubenswrapper[4877]: I0128 16:37:34.409975 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.087202 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.267561 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kube-api-access\") pod \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.267734 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kubelet-dir\") pod \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\" (UID: \"f478db55-6d4d-4975-bc73-2ea2cce06bc4\") " Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.267852 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "f478db55-6d4d-4975-bc73-2ea2cce06bc4" (UID: "f478db55-6d4d-4975-bc73-2ea2cce06bc4"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.268057 4877 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.275162 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f478db55-6d4d-4975-bc73-2ea2cce06bc4" (UID: "f478db55-6d4d-4975-bc73-2ea2cce06bc4"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.369271 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f478db55-6d4d-4975-bc73-2ea2cce06bc4-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.411932 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:35 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:35 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:35 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.411997 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.580963 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"f478db55-6d4d-4975-bc73-2ea2cce06bc4","Type":"ContainerDied","Data":"24081af958e7bb36fd646b996e4afa33a4d06586daf65326369ada5ae4ea3081"} Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.581013 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24081af958e7bb36fd646b996e4afa33a4d06586daf65326369ada5ae4ea3081" Jan 28 16:37:35 crc kubenswrapper[4877]: I0128 16:37:35.581018 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 16:37:36 crc kubenswrapper[4877]: I0128 16:37:36.410808 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:36 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:36 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:36 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:36 crc kubenswrapper[4877]: I0128 16:37:36.411387 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:37 crc kubenswrapper[4877]: I0128 16:37:37.076442 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:37:37 crc kubenswrapper[4877]: I0128 16:37:37.076532 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:37:37 crc kubenswrapper[4877]: I0128 16:37:37.409734 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:37 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:37 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:37 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:37 crc kubenswrapper[4877]: I0128 16:37:37.409827 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:38 crc kubenswrapper[4877]: I0128 16:37:38.263716 4877 patch_prober.go:28] interesting pod/console-f9d7485db-f5npr container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Jan 28 16:37:38 crc kubenswrapper[4877]: I0128 16:37:38.264299 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-f5npr" podUID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Jan 28 16:37:38 crc kubenswrapper[4877]: I0128 16:37:38.409089 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:38 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:38 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:38 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:38 crc kubenswrapper[4877]: I0128 16:37:38.409183 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:39 crc kubenswrapper[4877]: I0128 16:37:39.343893 4877 patch_prober.go:28] interesting pod/downloads-7954f5f757-vpzx9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 16:37:39 crc kubenswrapper[4877]: I0128 16:37:39.343955 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-vpzx9" podUID="754cf791-541c-4944-bf3e-7ba18f44d8de" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 16:37:39 crc kubenswrapper[4877]: I0128 16:37:39.345186 4877 patch_prober.go:28] interesting pod/downloads-7954f5f757-vpzx9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 16:37:39 crc kubenswrapper[4877]: I0128 16:37:39.345265 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-vpzx9" podUID="754cf791-541c-4944-bf3e-7ba18f44d8de" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 16:37:39 crc kubenswrapper[4877]: I0128 16:37:39.410830 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:39 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:39 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:39 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:39 crc kubenswrapper[4877]: I0128 16:37:39.410891 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:40 crc kubenswrapper[4877]: I0128 16:37:40.409921 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:40 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:40 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:40 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:40 crc kubenswrapper[4877]: I0128 16:37:40.410513 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:41 crc kubenswrapper[4877]: I0128 16:37:41.409259 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:41 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:41 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:41 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:41 crc kubenswrapper[4877]: I0128 16:37:41.409366 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:42 crc kubenswrapper[4877]: I0128 16:37:42.363225 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:37:42 crc kubenswrapper[4877]: I0128 16:37:42.411498 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 16:37:42 crc kubenswrapper[4877]: [-]has-synced failed: reason withheld Jan 28 16:37:42 crc kubenswrapper[4877]: [+]process-running ok Jan 28 16:37:42 crc kubenswrapper[4877]: healthz check failed Jan 28 16:37:42 crc kubenswrapper[4877]: I0128 16:37:42.411594 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 16:37:43 crc kubenswrapper[4877]: I0128 16:37:43.409674 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:43 crc kubenswrapper[4877]: I0128 16:37:43.412937 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-vbvr6" Jan 28 16:37:43 crc kubenswrapper[4877]: I0128 16:37:43.707905 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:43 crc kubenswrapper[4877]: I0128 16:37:43.720139 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a6ea3417-5f04-4035-aaea-0dc5ad7d002d-metrics-certs\") pod \"network-metrics-daemon-bh9bk\" (UID: \"a6ea3417-5f04-4035-aaea-0dc5ad7d002d\") " pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:43 crc kubenswrapper[4877]: I0128 16:37:43.989399 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bh9bk" Jan 28 16:37:48 crc kubenswrapper[4877]: I0128 16:37:48.273375 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:48 crc kubenswrapper[4877]: I0128 16:37:48.281967 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:37:48 crc kubenswrapper[4877]: I0128 16:37:48.720414 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:37:49 crc kubenswrapper[4877]: I0128 16:37:49.358899 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-vpzx9" Jan 28 16:37:59 crc kubenswrapper[4877]: I0128 16:37:59.537337 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" Jan 28 16:38:05 crc kubenswrapper[4877]: I0128 16:38:05.580752 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.333843 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 16:38:06 crc kubenswrapper[4877]: E0128 16:38:06.334665 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b20edd5d-4283-441b-9c08-7fe7d530e0af" containerName="pruner" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.334684 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b20edd5d-4283-441b-9c08-7fe7d530e0af" containerName="pruner" Jan 28 16:38:06 crc kubenswrapper[4877]: E0128 16:38:06.334698 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f478db55-6d4d-4975-bc73-2ea2cce06bc4" containerName="pruner" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.334706 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f478db55-6d4d-4975-bc73-2ea2cce06bc4" containerName="pruner" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.334872 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b20edd5d-4283-441b-9c08-7fe7d530e0af" containerName="pruner" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.334893 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f478db55-6d4d-4975-bc73-2ea2cce06bc4" containerName="pruner" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.335419 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.338152 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.338672 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.345572 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.392047 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/976b01e6-3ad6-48c4-b40b-58a38bad6294-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.392266 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/976b01e6-3ad6-48c4-b40b-58a38bad6294-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.493854 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/976b01e6-3ad6-48c4-b40b-58a38bad6294-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.493954 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/976b01e6-3ad6-48c4-b40b-58a38bad6294-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.494051 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/976b01e6-3ad6-48c4-b40b-58a38bad6294-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.518984 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/976b01e6-3ad6-48c4-b40b-58a38bad6294-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: I0128 16:38:06.663826 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:06 crc kubenswrapper[4877]: E0128 16:38:06.822584 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 16:38:06 crc kubenswrapper[4877]: E0128 16:38:06.822926 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cq772,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-sszxr_openshift-marketplace(8d139104-17f5-47de-a21d-08340d961df3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:06 crc kubenswrapper[4877]: E0128 16:38:06.824212 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-sszxr" podUID="8d139104-17f5-47de-a21d-08340d961df3" Jan 28 16:38:07 crc kubenswrapper[4877]: I0128 16:38:07.076669 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:38:07 crc kubenswrapper[4877]: I0128 16:38:07.077179 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:38:07 crc kubenswrapper[4877]: E0128 16:38:07.626025 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-sszxr" podUID="8d139104-17f5-47de-a21d-08340d961df3" Jan 28 16:38:07 crc kubenswrapper[4877]: E0128 16:38:07.697735 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 16:38:07 crc kubenswrapper[4877]: E0128 16:38:07.697957 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7fm9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-d2vhl_openshift-marketplace(4a7c4fb9-52e4-4736-9165-b793c332af0d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:07 crc kubenswrapper[4877]: E0128 16:38:07.701617 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-d2vhl" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.330986 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.333267 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.353630 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.360095 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.360158 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kube-api-access\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.360187 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-var-lock\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.461459 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.461577 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.461604 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kube-api-access\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.461647 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-var-lock\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.461808 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-var-lock\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.493970 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kube-api-access\") pod \"installer-9-crc\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: I0128 16:38:11.657267 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:11 crc kubenswrapper[4877]: E0128 16:38:11.772717 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-d2vhl" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" Jan 28 16:38:11 crc kubenswrapper[4877]: E0128 16:38:11.868888 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 16:38:11 crc kubenswrapper[4877]: E0128 16:38:11.869558 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fgcpj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-fmnbl_openshift-marketplace(7fe92108-0e44-423d-b939-5ee8aec6c82f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:11 crc kubenswrapper[4877]: E0128 16:38:11.870763 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-fmnbl" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" Jan 28 16:38:11 crc kubenswrapper[4877]: E0128 16:38:11.890210 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 16:38:11 crc kubenswrapper[4877]: E0128 16:38:11.890404 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bqdqb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-vxfw7_openshift-marketplace(22e4ba64-0a17-4ea7-8b9c-aa09d864be39): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:11 crc kubenswrapper[4877]: E0128 16:38:11.891706 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-vxfw7" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.353645 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-fmnbl" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.354639 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-vxfw7" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.443392 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.444361 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jqc5k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-2m2k4_openshift-marketplace(f76979dc-a93e-496f-b9ac-e3f0710c2899): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.446767 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-2m2k4" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.446779 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.447072 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kvclq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-prvss_openshift-marketplace(fa400ca9-c7cc-482b-af01-6743a80710fe): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.448263 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-prvss" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.462525 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.462729 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6tth6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-hx6kq_openshift-marketplace(cad628ad-2502-408b-ab7a-4a5be2d1637f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.464659 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-hx6kq" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.514688 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.515020 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-79wvd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-q5lsk_openshift-marketplace(db5392ea-c535-46b8-80cf-7fc0b43bf1de): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.516704 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-q5lsk" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" Jan 28 16:38:13 crc kubenswrapper[4877]: I0128 16:38:13.846535 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bh9bk"] Jan 28 16:38:13 crc kubenswrapper[4877]: I0128 16:38:13.881505 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" event={"ID":"a6ea3417-5f04-4035-aaea-0dc5ad7d002d","Type":"ContainerStarted","Data":"fb897e6c9c5711a9e46d1eddbce6d387e2326fed7fce2bc55d1bbee8a38825be"} Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.883435 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-2m2k4" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.883834 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-q5lsk" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.883876 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-prvss" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" Jan 28 16:38:13 crc kubenswrapper[4877]: E0128 16:38:13.885969 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-hx6kq" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" Jan 28 16:38:13 crc kubenswrapper[4877]: I0128 16:38:13.923563 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 16:38:13 crc kubenswrapper[4877]: I0128 16:38:13.926253 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 16:38:13 crc kubenswrapper[4877]: W0128 16:38:13.936578 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod3a2b17ce_acb8_4c3d_830c_5fb8b521a5d8.slice/crio-ec2e5aee7dd31eea87085cefdba6781544120c7d23bdeb901a6640bd694cf655 WatchSource:0}: Error finding container ec2e5aee7dd31eea87085cefdba6781544120c7d23bdeb901a6640bd694cf655: Status 404 returned error can't find the container with id ec2e5aee7dd31eea87085cefdba6781544120c7d23bdeb901a6640bd694cf655 Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.888901 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"976b01e6-3ad6-48c4-b40b-58a38bad6294","Type":"ContainerStarted","Data":"9b1c576090804b1594a4e7802501c002819ffb58a08897cf83b3f38df34b00bf"} Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.889382 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"976b01e6-3ad6-48c4-b40b-58a38bad6294","Type":"ContainerStarted","Data":"6e7d15cf0878588d58011b84819ac52ed2d879cb73070017c082369a1995eca3"} Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.890847 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" event={"ID":"a6ea3417-5f04-4035-aaea-0dc5ad7d002d","Type":"ContainerStarted","Data":"59edf28af27a503a0d95387c254efe7ff2d6dc98d5fc213264ca44af1c5ce28d"} Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.890917 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bh9bk" event={"ID":"a6ea3417-5f04-4035-aaea-0dc5ad7d002d","Type":"ContainerStarted","Data":"228018ee0871b9fb4af7b8cfc6f10615031de1e08f53c401e348b0e427a97c89"} Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.893621 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8","Type":"ContainerStarted","Data":"77bf4f3b61720e0cd0aa6a280bb82c95062a79f56e8ce48c8dbd85e60364a814"} Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.893658 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8","Type":"ContainerStarted","Data":"ec2e5aee7dd31eea87085cefdba6781544120c7d23bdeb901a6640bd694cf655"} Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.925894 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=8.925865578 podStartE2EDuration="8.925865578s" podCreationTimestamp="2026-01-28 16:38:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:38:14.905781973 +0000 UTC m=+198.464108861" watchObservedRunningTime="2026-01-28 16:38:14.925865578 +0000 UTC m=+198.484192466" Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.929404 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=3.929390154 podStartE2EDuration="3.929390154s" podCreationTimestamp="2026-01-28 16:38:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:38:14.925804416 +0000 UTC m=+198.484131304" watchObservedRunningTime="2026-01-28 16:38:14.929390154 +0000 UTC m=+198.487717042" Jan 28 16:38:14 crc kubenswrapper[4877]: I0128 16:38:14.946345 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-bh9bk" podStartSLOduration=173.946318334 podStartE2EDuration="2m53.946318334s" podCreationTimestamp="2026-01-28 16:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:38:14.941645793 +0000 UTC m=+198.499972701" watchObservedRunningTime="2026-01-28 16:38:14.946318334 +0000 UTC m=+198.504645222" Jan 28 16:38:15 crc kubenswrapper[4877]: I0128 16:38:15.180356 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-whfj4"] Jan 28 16:38:15 crc kubenswrapper[4877]: I0128 16:38:15.908073 4877 generic.go:334] "Generic (PLEG): container finished" podID="976b01e6-3ad6-48c4-b40b-58a38bad6294" containerID="9b1c576090804b1594a4e7802501c002819ffb58a08897cf83b3f38df34b00bf" exitCode=0 Jan 28 16:38:15 crc kubenswrapper[4877]: I0128 16:38:15.908160 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"976b01e6-3ad6-48c4-b40b-58a38bad6294","Type":"ContainerDied","Data":"9b1c576090804b1594a4e7802501c002819ffb58a08897cf83b3f38df34b00bf"} Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.294921 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.412280 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/976b01e6-3ad6-48c4-b40b-58a38bad6294-kubelet-dir\") pod \"976b01e6-3ad6-48c4-b40b-58a38bad6294\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.412780 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/976b01e6-3ad6-48c4-b40b-58a38bad6294-kube-api-access\") pod \"976b01e6-3ad6-48c4-b40b-58a38bad6294\" (UID: \"976b01e6-3ad6-48c4-b40b-58a38bad6294\") " Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.412452 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/976b01e6-3ad6-48c4-b40b-58a38bad6294-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "976b01e6-3ad6-48c4-b40b-58a38bad6294" (UID: "976b01e6-3ad6-48c4-b40b-58a38bad6294"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.413348 4877 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/976b01e6-3ad6-48c4-b40b-58a38bad6294-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.419693 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/976b01e6-3ad6-48c4-b40b-58a38bad6294-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "976b01e6-3ad6-48c4-b40b-58a38bad6294" (UID: "976b01e6-3ad6-48c4-b40b-58a38bad6294"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.514957 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/976b01e6-3ad6-48c4-b40b-58a38bad6294-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.928434 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"976b01e6-3ad6-48c4-b40b-58a38bad6294","Type":"ContainerDied","Data":"6e7d15cf0878588d58011b84819ac52ed2d879cb73070017c082369a1995eca3"} Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.928543 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e7d15cf0878588d58011b84819ac52ed2d879cb73070017c082369a1995eca3" Jan 28 16:38:17 crc kubenswrapper[4877]: I0128 16:38:17.929002 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 16:38:22 crc kubenswrapper[4877]: I0128 16:38:22.958451 4877 generic.go:334] "Generic (PLEG): container finished" podID="8d139104-17f5-47de-a21d-08340d961df3" containerID="39c1b4974ec248afa76605554d29a03c790d1d868a2ec3c008b621b8391e57bb" exitCode=0 Jan 28 16:38:22 crc kubenswrapper[4877]: I0128 16:38:22.958538 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sszxr" event={"ID":"8d139104-17f5-47de-a21d-08340d961df3","Type":"ContainerDied","Data":"39c1b4974ec248afa76605554d29a03c790d1d868a2ec3c008b621b8391e57bb"} Jan 28 16:38:23 crc kubenswrapper[4877]: I0128 16:38:23.968983 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sszxr" event={"ID":"8d139104-17f5-47de-a21d-08340d961df3","Type":"ContainerStarted","Data":"94c34b88600d9843d390c37a3ea3fcc00126bcf6131176e29b76b64f2f852a37"} Jan 28 16:38:23 crc kubenswrapper[4877]: I0128 16:38:23.992086 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sszxr" podStartSLOduration=2.780074647 podStartE2EDuration="57.992063615s" podCreationTimestamp="2026-01-28 16:37:26 +0000 UTC" firstStartedPulling="2026-01-28 16:37:28.172260933 +0000 UTC m=+151.730587821" lastFinishedPulling="2026-01-28 16:38:23.384249901 +0000 UTC m=+206.942576789" observedRunningTime="2026-01-28 16:38:23.988121636 +0000 UTC m=+207.546448554" watchObservedRunningTime="2026-01-28 16:38:23.992063615 +0000 UTC m=+207.550390503" Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.789048 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.789503 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.933797 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.988703 4877 generic.go:334] "Generic (PLEG): container finished" podID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerID="c170df2693cb5aa452c76e4bc7e468dcd49375ee68f4810e86db025cccb49bcd" exitCode=0 Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.988777 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5lsk" event={"ID":"db5392ea-c535-46b8-80cf-7fc0b43bf1de","Type":"ContainerDied","Data":"c170df2693cb5aa452c76e4bc7e468dcd49375ee68f4810e86db025cccb49bcd"} Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.991355 4877 generic.go:334] "Generic (PLEG): container finished" podID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerID="2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27" exitCode=0 Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.991490 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-prvss" event={"ID":"fa400ca9-c7cc-482b-af01-6743a80710fe","Type":"ContainerDied","Data":"2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27"} Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.995543 4877 generic.go:334] "Generic (PLEG): container finished" podID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerID="9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4" exitCode=0 Jan 28 16:38:26 crc kubenswrapper[4877]: I0128 16:38:26.995615 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fmnbl" event={"ID":"7fe92108-0e44-423d-b939-5ee8aec6c82f","Type":"ContainerDied","Data":"9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4"} Jan 28 16:38:28 crc kubenswrapper[4877]: I0128 16:38:28.008291 4877 generic.go:334] "Generic (PLEG): container finished" podID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerID="faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c" exitCode=0 Jan 28 16:38:28 crc kubenswrapper[4877]: I0128 16:38:28.008373 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2m2k4" event={"ID":"f76979dc-a93e-496f-b9ac-e3f0710c2899","Type":"ContainerDied","Data":"faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c"} Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.019091 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fmnbl" event={"ID":"7fe92108-0e44-423d-b939-5ee8aec6c82f","Type":"ContainerStarted","Data":"a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b"} Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.022156 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5lsk" event={"ID":"db5392ea-c535-46b8-80cf-7fc0b43bf1de","Type":"ContainerStarted","Data":"b2f4e60aa3a730bcfc985dbbf261241c1ad5887fe251a84d2ca3937582bfbcd1"} Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.024631 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-prvss" event={"ID":"fa400ca9-c7cc-482b-af01-6743a80710fe","Type":"ContainerStarted","Data":"8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14"} Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.079231 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fmnbl" podStartSLOduration=4.156521843 podStartE2EDuration="1m0.079212243s" podCreationTimestamp="2026-01-28 16:37:29 +0000 UTC" firstStartedPulling="2026-01-28 16:37:31.494618285 +0000 UTC m=+155.052945173" lastFinishedPulling="2026-01-28 16:38:27.417308685 +0000 UTC m=+210.975635573" observedRunningTime="2026-01-28 16:38:29.050317723 +0000 UTC m=+212.608644611" watchObservedRunningTime="2026-01-28 16:38:29.079212243 +0000 UTC m=+212.637539131" Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.080327 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-prvss" podStartSLOduration=3.8322260630000002 podStartE2EDuration="1m3.080321507s" podCreationTimestamp="2026-01-28 16:37:26 +0000 UTC" firstStartedPulling="2026-01-28 16:37:28.201583806 +0000 UTC m=+151.759910684" lastFinishedPulling="2026-01-28 16:38:27.44967925 +0000 UTC m=+211.008006128" observedRunningTime="2026-01-28 16:38:29.078331387 +0000 UTC m=+212.636658275" watchObservedRunningTime="2026-01-28 16:38:29.080321507 +0000 UTC m=+212.638648395" Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.096298 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q5lsk" podStartSLOduration=4.914823733 podStartE2EDuration="1m3.096276587s" podCreationTimestamp="2026-01-28 16:37:26 +0000 UTC" firstStartedPulling="2026-01-28 16:37:29.292009982 +0000 UTC m=+152.850336890" lastFinishedPulling="2026-01-28 16:38:27.473462856 +0000 UTC m=+211.031789744" observedRunningTime="2026-01-28 16:38:29.094740481 +0000 UTC m=+212.653067369" watchObservedRunningTime="2026-01-28 16:38:29.096276587 +0000 UTC m=+212.654603475" Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.986163 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:38:29 crc kubenswrapper[4877]: I0128 16:38:29.986245 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:38:31 crc kubenswrapper[4877]: I0128 16:38:31.035550 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fmnbl" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="registry-server" probeResult="failure" output=< Jan 28 16:38:31 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 16:38:31 crc kubenswrapper[4877]: > Jan 28 16:38:36 crc kubenswrapper[4877]: I0128 16:38:36.490150 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:38:36 crc kubenswrapper[4877]: I0128 16:38:36.490712 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:38:36 crc kubenswrapper[4877]: I0128 16:38:36.555427 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:38:36 crc kubenswrapper[4877]: I0128 16:38:36.838120 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.077003 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.077549 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.077803 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.078729 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.079110 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb" gracePeriod=600 Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.146692 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.151829 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.152165 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:38:37 crc kubenswrapper[4877]: I0128 16:38:37.231307 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.085465 4877 generic.go:334] "Generic (PLEG): container finished" podID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerID="3153a3e344d9547ece00c827f28c46ad0fc5c55b3aca0fa5f4bc3c080c7f4330" exitCode=0 Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.085579 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxfw7" event={"ID":"22e4ba64-0a17-4ea7-8b9c-aa09d864be39","Type":"ContainerDied","Data":"3153a3e344d9547ece00c827f28c46ad0fc5c55b3aca0fa5f4bc3c080c7f4330"} Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.088940 4877 generic.go:334] "Generic (PLEG): container finished" podID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerID="45fce4c53b20b248ab842b9f0b77528ef18fc78c2e58f41ebbb379b6625b3663" exitCode=0 Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.089034 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2vhl" event={"ID":"4a7c4fb9-52e4-4736-9165-b793c332af0d","Type":"ContainerDied","Data":"45fce4c53b20b248ab842b9f0b77528ef18fc78c2e58f41ebbb379b6625b3663"} Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.094539 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb" exitCode=0 Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.094693 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb"} Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.094756 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"163e625204e85fa60aefc636260cc789258eff00206d927c91d05b2e7e892ef9"} Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.098089 4877 generic.go:334] "Generic (PLEG): container finished" podID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerID="9d6a2ce44e8a3d3efc557ea24ba53c1ee1124d4e43d2122598cee4aeeaf6736b" exitCode=0 Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.098134 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hx6kq" event={"ID":"cad628ad-2502-408b-ab7a-4a5be2d1637f","Type":"ContainerDied","Data":"9d6a2ce44e8a3d3efc557ea24ba53c1ee1124d4e43d2122598cee4aeeaf6736b"} Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.104261 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2m2k4" event={"ID":"f76979dc-a93e-496f-b9ac-e3f0710c2899","Type":"ContainerStarted","Data":"f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876"} Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.169750 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:38:38 crc kubenswrapper[4877]: I0128 16:38:38.190787 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2m2k4" podStartSLOduration=4.539979611 podStartE2EDuration="1m12.190761495s" podCreationTimestamp="2026-01-28 16:37:26 +0000 UTC" firstStartedPulling="2026-01-28 16:37:29.25185577 +0000 UTC m=+152.810182648" lastFinishedPulling="2026-01-28 16:38:36.902637644 +0000 UTC m=+220.460964532" observedRunningTime="2026-01-28 16:38:38.190166376 +0000 UTC m=+221.748493264" watchObservedRunningTime="2026-01-28 16:38:38.190761495 +0000 UTC m=+221.749088383" Jan 28 16:38:39 crc kubenswrapper[4877]: I0128 16:38:39.395003 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q5lsk"] Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.027587 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.071254 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.118034 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxfw7" event={"ID":"22e4ba64-0a17-4ea7-8b9c-aa09d864be39","Type":"ContainerStarted","Data":"d17573daeb69e8294ee5e32d3d660477680d91b510f48fa49c0a1772ed1c4fa8"} Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.120769 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2vhl" event={"ID":"4a7c4fb9-52e4-4736-9165-b793c332af0d","Type":"ContainerStarted","Data":"278c64682b2e1bc0ee33815f5dfc2dfc0050ab186b9c1a381b751eb690f21fab"} Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.122542 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hx6kq" event={"ID":"cad628ad-2502-408b-ab7a-4a5be2d1637f","Type":"ContainerStarted","Data":"07d4bb41297d9db30f69d5e358773aa5c35c6eab7dd4240deb4ecf4f9918e6db"} Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.145438 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vxfw7" podStartSLOduration=2.957197244 podStartE2EDuration="1m11.145416979s" podCreationTimestamp="2026-01-28 16:37:29 +0000 UTC" firstStartedPulling="2026-01-28 16:37:31.492077177 +0000 UTC m=+155.050404065" lastFinishedPulling="2026-01-28 16:38:39.680296912 +0000 UTC m=+223.238623800" observedRunningTime="2026-01-28 16:38:40.139079808 +0000 UTC m=+223.697406716" watchObservedRunningTime="2026-01-28 16:38:40.145416979 +0000 UTC m=+223.703743867" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.164175 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hx6kq" podStartSLOduration=3.241242868 podStartE2EDuration="1m12.164143123s" podCreationTimestamp="2026-01-28 16:37:28 +0000 UTC" firstStartedPulling="2026-01-28 16:37:30.459350211 +0000 UTC m=+154.017677099" lastFinishedPulling="2026-01-28 16:38:39.382250466 +0000 UTC m=+222.940577354" observedRunningTime="2026-01-28 16:38:40.156666508 +0000 UTC m=+223.714993386" watchObservedRunningTime="2026-01-28 16:38:40.164143123 +0000 UTC m=+223.722470011" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.185063 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d2vhl" podStartSLOduration=1.99123466 podStartE2EDuration="1m12.185035242s" podCreationTimestamp="2026-01-28 16:37:28 +0000 UTC" firstStartedPulling="2026-01-28 16:37:29.300303243 +0000 UTC m=+152.858630131" lastFinishedPulling="2026-01-28 16:38:39.494103825 +0000 UTC m=+223.052430713" observedRunningTime="2026-01-28 16:38:40.18331529 +0000 UTC m=+223.741642178" watchObservedRunningTime="2026-01-28 16:38:40.185035242 +0000 UTC m=+223.743362130" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.214698 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" podUID="01fc6775-f774-41c4-872e-dba5e6d80e10" containerName="oauth-openshift" containerID="cri-o://fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2" gracePeriod=15 Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.662345 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.695781 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7448d7568b-r4ttp"] Jan 28 16:38:40 crc kubenswrapper[4877]: E0128 16:38:40.696043 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="976b01e6-3ad6-48c4-b40b-58a38bad6294" containerName="pruner" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.696057 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="976b01e6-3ad6-48c4-b40b-58a38bad6294" containerName="pruner" Jan 28 16:38:40 crc kubenswrapper[4877]: E0128 16:38:40.696066 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01fc6775-f774-41c4-872e-dba5e6d80e10" containerName="oauth-openshift" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.696073 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="01fc6775-f774-41c4-872e-dba5e6d80e10" containerName="oauth-openshift" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.696186 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="976b01e6-3ad6-48c4-b40b-58a38bad6294" containerName="pruner" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.696200 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="01fc6775-f774-41c4-872e-dba5e6d80e10" containerName="oauth-openshift" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.696653 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.723022 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7448d7568b-r4ttp"] Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779638 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-trusted-ca-bundle\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779719 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6749\" (UniqueName: \"kubernetes.io/projected/01fc6775-f774-41c4-872e-dba5e6d80e10-kube-api-access-b6749\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779753 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-service-ca\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779776 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-idp-0-file-data\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779857 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-error\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779893 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-policies\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779950 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-dir\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.779977 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-login\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780009 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-session\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780036 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-cliconfig\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780059 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-serving-cert\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780081 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-ocp-branding-template\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780105 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-provider-selection\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780125 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-router-certs\") pod \"01fc6775-f774-41c4-872e-dba5e6d80e10\" (UID: \"01fc6775-f774-41c4-872e-dba5e6d80e10\") " Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780304 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-login\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780343 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlz8j\" (UniqueName: \"kubernetes.io/projected/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-kube-api-access-jlz8j\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.780379 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.781286 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782138 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782172 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-audit-dir\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782201 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782229 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782253 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-service-ca\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782271 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782292 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-session\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782314 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782332 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-error\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782364 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-audit-policies\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782407 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-router-certs\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782465 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782665 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.782772 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.783231 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.783798 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.806911 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.807042 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01fc6775-f774-41c4-872e-dba5e6d80e10-kube-api-access-b6749" (OuterVolumeSpecName: "kube-api-access-b6749") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "kube-api-access-b6749". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.807801 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.807837 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.808090 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.809701 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.811207 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.811531 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.814297 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "01fc6775-f774-41c4-872e-dba5e6d80e10" (UID: "01fc6775-f774-41c4-872e-dba5e6d80e10"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.884642 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885393 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885418 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-audit-dir\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885443 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885466 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885501 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-service-ca\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885516 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885535 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-session\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885555 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885578 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-error\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885609 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-audit-policies\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885647 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-router-certs\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885677 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-login\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885725 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlz8j\" (UniqueName: \"kubernetes.io/projected/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-kube-api-access-jlz8j\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885776 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6749\" (UniqueName: \"kubernetes.io/projected/01fc6775-f774-41c4-872e-dba5e6d80e10-kube-api-access-b6749\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885788 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885798 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885811 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885822 4877 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885832 4877 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01fc6775-f774-41c4-872e-dba5e6d80e10-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885879 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885889 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885898 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885909 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885920 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885935 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.885945 4877 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01fc6775-f774-41c4-872e-dba5e6d80e10-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.886494 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.886529 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-service-ca\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.886576 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-audit-dir\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.886792 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-audit-policies\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.887003 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.892172 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-router-certs\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.892229 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.893214 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.893513 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.893523 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-error\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.893867 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-system-session\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.894001 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.895313 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-v4-0-config-user-template-login\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:40 crc kubenswrapper[4877]: I0128 16:38:40.905179 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlz8j\" (UniqueName: \"kubernetes.io/projected/eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115-kube-api-access-jlz8j\") pod \"oauth-openshift-7448d7568b-r4ttp\" (UID: \"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115\") " pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.009962 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.136199 4877 generic.go:334] "Generic (PLEG): container finished" podID="01fc6775-f774-41c4-872e-dba5e6d80e10" containerID="fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2" exitCode=0 Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.136281 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" event={"ID":"01fc6775-f774-41c4-872e-dba5e6d80e10","Type":"ContainerDied","Data":"fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2"} Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.136291 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.136345 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-whfj4" event={"ID":"01fc6775-f774-41c4-872e-dba5e6d80e10","Type":"ContainerDied","Data":"2f7b2feb8a3607cad91f718ba88af53d7f50dec920a138c70a18fbe5264f51f1"} Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.136371 4877 scope.go:117] "RemoveContainer" containerID="fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2" Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.138282 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q5lsk" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="registry-server" containerID="cri-o://b2f4e60aa3a730bcfc985dbbf261241c1ad5887fe251a84d2ca3937582bfbcd1" gracePeriod=2 Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.188553 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-whfj4"] Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.210746 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-whfj4"] Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.239046 4877 scope.go:117] "RemoveContainer" containerID="fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2" Jan 28 16:38:41 crc kubenswrapper[4877]: E0128 16:38:41.241030 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2\": container with ID starting with fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2 not found: ID does not exist" containerID="fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2" Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.241064 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2"} err="failed to get container status \"fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2\": rpc error: code = NotFound desc = could not find container \"fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2\": container with ID starting with fdcaaf42922292a2614c4426936654a6ff76d10141f1260c301837ef9c3f37d2 not found: ID does not exist" Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.292146 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7448d7568b-r4ttp"] Jan 28 16:38:41 crc kubenswrapper[4877]: I0128 16:38:41.338160 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01fc6775-f774-41c4-872e-dba5e6d80e10" path="/var/lib/kubelet/pods/01fc6775-f774-41c4-872e-dba5e6d80e10/volumes" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.164329 4877 generic.go:334] "Generic (PLEG): container finished" podID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerID="b2f4e60aa3a730bcfc985dbbf261241c1ad5887fe251a84d2ca3937582bfbcd1" exitCode=0 Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.164530 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5lsk" event={"ID":"db5392ea-c535-46b8-80cf-7fc0b43bf1de","Type":"ContainerDied","Data":"b2f4e60aa3a730bcfc985dbbf261241c1ad5887fe251a84d2ca3937582bfbcd1"} Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.168738 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" event={"ID":"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115","Type":"ContainerStarted","Data":"e88cb39ae9962b2aba7a4c2ada5926c141a192cb663c7b6bc4843f8b15a4f02e"} Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.168774 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" event={"ID":"eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115","Type":"ContainerStarted","Data":"333a4ef8b7f81b452d7d08f0e9e0d998adb9f8c59ee2304d6d06b7bb5687e053"} Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.170266 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.199604 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" podStartSLOduration=27.199578939 podStartE2EDuration="27.199578939s" podCreationTimestamp="2026-01-28 16:38:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:38:42.191519016 +0000 UTC m=+225.749845904" watchObservedRunningTime="2026-01-28 16:38:42.199578939 +0000 UTC m=+225.757905827" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.281677 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.302203 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.411076 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79wvd\" (UniqueName: \"kubernetes.io/projected/db5392ea-c535-46b8-80cf-7fc0b43bf1de-kube-api-access-79wvd\") pod \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.411294 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-utilities\") pod \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.411328 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-catalog-content\") pod \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\" (UID: \"db5392ea-c535-46b8-80cf-7fc0b43bf1de\") " Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.412925 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-utilities" (OuterVolumeSpecName: "utilities") pod "db5392ea-c535-46b8-80cf-7fc0b43bf1de" (UID: "db5392ea-c535-46b8-80cf-7fc0b43bf1de"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.433812 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db5392ea-c535-46b8-80cf-7fc0b43bf1de-kube-api-access-79wvd" (OuterVolumeSpecName: "kube-api-access-79wvd") pod "db5392ea-c535-46b8-80cf-7fc0b43bf1de" (UID: "db5392ea-c535-46b8-80cf-7fc0b43bf1de"). InnerVolumeSpecName "kube-api-access-79wvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.491130 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "db5392ea-c535-46b8-80cf-7fc0b43bf1de" (UID: "db5392ea-c535-46b8-80cf-7fc0b43bf1de"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.513196 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.513253 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db5392ea-c535-46b8-80cf-7fc0b43bf1de-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:42 crc kubenswrapper[4877]: I0128 16:38:42.513312 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79wvd\" (UniqueName: \"kubernetes.io/projected/db5392ea-c535-46b8-80cf-7fc0b43bf1de-kube-api-access-79wvd\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.179746 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5lsk" Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.179706 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5lsk" event={"ID":"db5392ea-c535-46b8-80cf-7fc0b43bf1de","Type":"ContainerDied","Data":"e5067ed33fba89eb609844e65fb6431d0d88d65abba7e21a29190ed4875d7a53"} Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.179875 4877 scope.go:117] "RemoveContainer" containerID="b2f4e60aa3a730bcfc985dbbf261241c1ad5887fe251a84d2ca3937582bfbcd1" Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.201962 4877 scope.go:117] "RemoveContainer" containerID="c170df2693cb5aa452c76e4bc7e468dcd49375ee68f4810e86db025cccb49bcd" Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.223693 4877 scope.go:117] "RemoveContainer" containerID="96b9c13db08a7b85231276ce730f5127e88f0afbf56d92336c60d16a4ac02fe1" Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.229142 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q5lsk"] Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.234698 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q5lsk"] Jan 28 16:38:43 crc kubenswrapper[4877]: I0128 16:38:43.345978 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" path="/var/lib/kubelet/pods/db5392ea-c535-46b8-80cf-7fc0b43bf1de/volumes" Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.395820 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fmnbl"] Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.396254 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fmnbl" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="registry-server" containerID="cri-o://a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b" gracePeriod=2 Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.816112 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.847548 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgcpj\" (UniqueName: \"kubernetes.io/projected/7fe92108-0e44-423d-b939-5ee8aec6c82f-kube-api-access-fgcpj\") pod \"7fe92108-0e44-423d-b939-5ee8aec6c82f\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.847612 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-catalog-content\") pod \"7fe92108-0e44-423d-b939-5ee8aec6c82f\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.847947 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-utilities\") pod \"7fe92108-0e44-423d-b939-5ee8aec6c82f\" (UID: \"7fe92108-0e44-423d-b939-5ee8aec6c82f\") " Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.849588 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-utilities" (OuterVolumeSpecName: "utilities") pod "7fe92108-0e44-423d-b939-5ee8aec6c82f" (UID: "7fe92108-0e44-423d-b939-5ee8aec6c82f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.855753 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fe92108-0e44-423d-b939-5ee8aec6c82f-kube-api-access-fgcpj" (OuterVolumeSpecName: "kube-api-access-fgcpj") pod "7fe92108-0e44-423d-b939-5ee8aec6c82f" (UID: "7fe92108-0e44-423d-b939-5ee8aec6c82f"). InnerVolumeSpecName "kube-api-access-fgcpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.950024 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.950066 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgcpj\" (UniqueName: \"kubernetes.io/projected/7fe92108-0e44-423d-b939-5ee8aec6c82f-kube-api-access-fgcpj\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:44 crc kubenswrapper[4877]: I0128 16:38:44.974940 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fe92108-0e44-423d-b939-5ee8aec6c82f" (UID: "7fe92108-0e44-423d-b939-5ee8aec6c82f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.052261 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fe92108-0e44-423d-b939-5ee8aec6c82f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.205595 4877 generic.go:334] "Generic (PLEG): container finished" podID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerID="a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b" exitCode=0 Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.205659 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fmnbl" event={"ID":"7fe92108-0e44-423d-b939-5ee8aec6c82f","Type":"ContainerDied","Data":"a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b"} Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.205682 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fmnbl" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.205691 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fmnbl" event={"ID":"7fe92108-0e44-423d-b939-5ee8aec6c82f","Type":"ContainerDied","Data":"b04b600372cd3d7baf626d5dad5b7b1ef52f2ef1ba10ffe0502db5cc07cfea6f"} Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.205704 4877 scope.go:117] "RemoveContainer" containerID="a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.224200 4877 scope.go:117] "RemoveContainer" containerID="9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.254188 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fmnbl"] Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.254412 4877 scope.go:117] "RemoveContainer" containerID="ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.260631 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fmnbl"] Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.272759 4877 scope.go:117] "RemoveContainer" containerID="a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b" Jan 28 16:38:45 crc kubenswrapper[4877]: E0128 16:38:45.273542 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b\": container with ID starting with a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b not found: ID does not exist" containerID="a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.273595 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b"} err="failed to get container status \"a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b\": rpc error: code = NotFound desc = could not find container \"a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b\": container with ID starting with a1c3364a5e1bbce62bacbbbf8815d8d59bd343e844abf411a3169c77b823ce5b not found: ID does not exist" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.273627 4877 scope.go:117] "RemoveContainer" containerID="9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4" Jan 28 16:38:45 crc kubenswrapper[4877]: E0128 16:38:45.274047 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4\": container with ID starting with 9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4 not found: ID does not exist" containerID="9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.274107 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4"} err="failed to get container status \"9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4\": rpc error: code = NotFound desc = could not find container \"9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4\": container with ID starting with 9d8fcae14d114abc6e3f949b7e8f030b9128e3f0df1f886a50fe27e73a9ac6d4 not found: ID does not exist" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.274152 4877 scope.go:117] "RemoveContainer" containerID="ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7" Jan 28 16:38:45 crc kubenswrapper[4877]: E0128 16:38:45.274724 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7\": container with ID starting with ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7 not found: ID does not exist" containerID="ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.274753 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7"} err="failed to get container status \"ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7\": rpc error: code = NotFound desc = could not find container \"ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7\": container with ID starting with ee9b6c0b0a5a15d0385df9d67ae97f345cd351efb0435ba6f9a8f254e07aadc7 not found: ID does not exist" Jan 28 16:38:45 crc kubenswrapper[4877]: I0128 16:38:45.338231 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" path="/var/lib/kubelet/pods/7fe92108-0e44-423d-b939-5ee8aec6c82f/volumes" Jan 28 16:38:47 crc kubenswrapper[4877]: I0128 16:38:47.019635 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:38:47 crc kubenswrapper[4877]: I0128 16:38:47.020009 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:38:47 crc kubenswrapper[4877]: I0128 16:38:47.063261 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:38:47 crc kubenswrapper[4877]: I0128 16:38:47.293502 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:38:48 crc kubenswrapper[4877]: I0128 16:38:48.267322 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:38:48 crc kubenswrapper[4877]: I0128 16:38:48.267861 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:38:48 crc kubenswrapper[4877]: I0128 16:38:48.328333 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:38:48 crc kubenswrapper[4877]: I0128 16:38:48.803390 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:38:48 crc kubenswrapper[4877]: I0128 16:38:48.803509 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:38:48 crc kubenswrapper[4877]: I0128 16:38:48.867916 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:38:49 crc kubenswrapper[4877]: I0128 16:38:49.285598 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:38:49 crc kubenswrapper[4877]: I0128 16:38:49.296211 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:38:49 crc kubenswrapper[4877]: I0128 16:38:49.564365 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:38:49 crc kubenswrapper[4877]: I0128 16:38:49.564413 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:38:49 crc kubenswrapper[4877]: I0128 16:38:49.621557 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:38:49 crc kubenswrapper[4877]: I0128 16:38:49.793309 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2m2k4"] Jan 28 16:38:49 crc kubenswrapper[4877]: I0128 16:38:49.793620 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2m2k4" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="registry-server" containerID="cri-o://f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876" gracePeriod=2 Jan 28 16:38:50 crc kubenswrapper[4877]: I0128 16:38:50.292260 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.027303 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.041488 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqc5k\" (UniqueName: \"kubernetes.io/projected/f76979dc-a93e-496f-b9ac-e3f0710c2899-kube-api-access-jqc5k\") pod \"f76979dc-a93e-496f-b9ac-e3f0710c2899\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.041595 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-utilities\") pod \"f76979dc-a93e-496f-b9ac-e3f0710c2899\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.041628 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-catalog-content\") pod \"f76979dc-a93e-496f-b9ac-e3f0710c2899\" (UID: \"f76979dc-a93e-496f-b9ac-e3f0710c2899\") " Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.042411 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-utilities" (OuterVolumeSpecName: "utilities") pod "f76979dc-a93e-496f-b9ac-e3f0710c2899" (UID: "f76979dc-a93e-496f-b9ac-e3f0710c2899"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.047604 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f76979dc-a93e-496f-b9ac-e3f0710c2899-kube-api-access-jqc5k" (OuterVolumeSpecName: "kube-api-access-jqc5k") pod "f76979dc-a93e-496f-b9ac-e3f0710c2899" (UID: "f76979dc-a93e-496f-b9ac-e3f0710c2899"). InnerVolumeSpecName "kube-api-access-jqc5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.100843 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f76979dc-a93e-496f-b9ac-e3f0710c2899" (UID: "f76979dc-a93e-496f-b9ac-e3f0710c2899"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.143817 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.143859 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f76979dc-a93e-496f-b9ac-e3f0710c2899-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.143872 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqc5k\" (UniqueName: \"kubernetes.io/projected/f76979dc-a93e-496f-b9ac-e3f0710c2899-kube-api-access-jqc5k\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.248986 4877 generic.go:334] "Generic (PLEG): container finished" podID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerID="f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876" exitCode=0 Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.249057 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2m2k4" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.249097 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2m2k4" event={"ID":"f76979dc-a93e-496f-b9ac-e3f0710c2899","Type":"ContainerDied","Data":"f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876"} Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.249167 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2m2k4" event={"ID":"f76979dc-a93e-496f-b9ac-e3f0710c2899","Type":"ContainerDied","Data":"a3c484671da2999b4e9e579b600bd5f3904e3e7d93200aac3f3063a24c231f4a"} Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.249196 4877 scope.go:117] "RemoveContainer" containerID="f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.265580 4877 scope.go:117] "RemoveContainer" containerID="faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.281211 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2m2k4"] Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.286740 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2m2k4"] Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.312821 4877 scope.go:117] "RemoveContainer" containerID="4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.333888 4877 scope.go:117] "RemoveContainer" containerID="f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.334449 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876\": container with ID starting with f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876 not found: ID does not exist" containerID="f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.334550 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876"} err="failed to get container status \"f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876\": rpc error: code = NotFound desc = could not find container \"f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876\": container with ID starting with f9d869b40b5b083671d1b92117a47f84e4495833722f704197246d0379afb876 not found: ID does not exist" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.334597 4877 scope.go:117] "RemoveContainer" containerID="faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.335126 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c\": container with ID starting with faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c not found: ID does not exist" containerID="faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.335157 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c"} err="failed to get container status \"faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c\": rpc error: code = NotFound desc = could not find container \"faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c\": container with ID starting with faa6e19ac7ee9006302eb4804be0c56a62713a8bc42ca3b952298f903dd29f3c not found: ID does not exist" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.335177 4877 scope.go:117] "RemoveContainer" containerID="4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.335505 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46\": container with ID starting with 4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46 not found: ID does not exist" containerID="4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.335574 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46"} err="failed to get container status \"4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46\": rpc error: code = NotFound desc = could not find container \"4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46\": container with ID starting with 4ecbb57cfc6762eb2718d3abc1ad3dbde27716281882acf872a2bc51569d1b46 not found: ID does not exist" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.352163 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" path="/var/lib/kubelet/pods/f76979dc-a93e-496f-b9ac-e3f0710c2899/volumes" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984497 4877 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984808 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="extract-utilities" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984825 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="extract-utilities" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984837 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="extract-content" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984847 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="extract-content" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984864 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="extract-utilities" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984871 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="extract-utilities" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984882 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="extract-utilities" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984891 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="extract-utilities" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984906 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984914 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984924 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984931 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984939 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984946 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984957 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="extract-content" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984967 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="extract-content" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.984979 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="extract-content" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.984986 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="extract-content" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985131 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fe92108-0e44-423d-b939-5ee8aec6c82f" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985144 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="db5392ea-c535-46b8-80cf-7fc0b43bf1de" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985163 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f76979dc-a93e-496f-b9ac-e3f0710c2899" containerName="registry-server" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985562 4877 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985859 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390" gracePeriod=15 Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985920 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985973 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae" gracePeriod=15 Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.986033 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3" gracePeriod=15 Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.985966 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2" gracePeriod=15 Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.986009 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd" gracePeriod=15 Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988307 4877 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.988530 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988547 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.988564 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988572 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.988583 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988590 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.988601 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988608 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.988623 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988631 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.988647 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988655 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 16:38:51 crc kubenswrapper[4877]: E0128 16:38:51.988671 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988679 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988802 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988813 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988828 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988837 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988847 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 16:38:51 crc kubenswrapper[4877]: I0128 16:38:51.988862 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.024098 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.063582 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.063660 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.063727 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.063754 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.063789 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.063822 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.064069 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.064165 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165390 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165456 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165496 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165540 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165559 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165586 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165616 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165593 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165647 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165652 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165648 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165678 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165670 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165686 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165738 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.165595 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.258253 4877 generic.go:334] "Generic (PLEG): container finished" podID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" containerID="77bf4f3b61720e0cd0aa6a280bb82c95062a79f56e8ce48c8dbd85e60364a814" exitCode=0 Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.258339 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8","Type":"ContainerDied","Data":"77bf4f3b61720e0cd0aa6a280bb82c95062a79f56e8ce48c8dbd85e60364a814"} Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.259373 4877 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.259905 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.260357 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.263339 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.265056 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.265992 4877 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3" exitCode=0 Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.266054 4877 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae" exitCode=0 Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.266065 4877 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2" exitCode=0 Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.266078 4877 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd" exitCode=2 Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.266104 4877 scope.go:117] "RemoveContainer" containerID="1beae85cd0df564517ae94a18d91b29d4a4b23d144ba61978ec2307f60a112c1" Jan 28 16:38:52 crc kubenswrapper[4877]: I0128 16:38:52.320808 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:38:52 crc kubenswrapper[4877]: E0128 16:38:52.344645 4877 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.129.56.34:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188ef27f576553c4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 16:38:52.344210372 +0000 UTC m=+235.902537290,LastTimestamp:2026-01-28 16:38:52.344210372 +0000 UTC m=+235.902537290,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.278299 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.284740 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"02d5c419b53a198d482d72da16c156375af65c60bd3795cd3813cdeb36b4ef5e"} Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.284921 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"713b526409312a89d88fcbb7515f918505a0f9e4fd658d414adcd96a43f7c2c0"} Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.285182 4877 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.286011 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.287434 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.527590 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.528588 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.528772 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.585309 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kubelet-dir\") pod \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.585371 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-var-lock\") pod \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.585522 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" (UID: "3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.585625 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kube-api-access\") pod \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\" (UID: \"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8\") " Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.585733 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-var-lock" (OuterVolumeSpecName: "var-lock") pod "3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" (UID: "3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.585818 4877 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.591751 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" (UID: "3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.687061 4877 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:53 crc kubenswrapper[4877]: I0128 16:38:53.687100 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.299995 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8","Type":"ContainerDied","Data":"ec2e5aee7dd31eea87085cefdba6781544120c7d23bdeb901a6640bd694cf655"} Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.300336 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.300363 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec2e5aee7dd31eea87085cefdba6781544120c7d23bdeb901a6640bd694cf655" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.348158 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.348793 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.355327 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.356432 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.357374 4877 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.357567 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.357731 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:54 crc kubenswrapper[4877]: E0128 16:38:54.395623 4877 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.129.56.34:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" volumeName="registry-storage" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.498626 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.498710 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.498890 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.499109 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.499159 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.499178 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.499380 4877 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.499415 4877 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:54 crc kubenswrapper[4877]: I0128 16:38:54.499433 4877 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.317069 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.319492 4877 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390" exitCode=0 Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.319673 4877 scope.go:117] "RemoveContainer" containerID="1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.320056 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.342839 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.343538 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.343872 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.344306 4877 scope.go:117] "RemoveContainer" containerID="cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.362992 4877 scope.go:117] "RemoveContainer" containerID="03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.379540 4877 scope.go:117] "RemoveContainer" containerID="d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.403120 4877 scope.go:117] "RemoveContainer" containerID="8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.427741 4877 scope.go:117] "RemoveContainer" containerID="4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.448680 4877 scope.go:117] "RemoveContainer" containerID="1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3" Jan 28 16:38:55 crc kubenswrapper[4877]: E0128 16:38:55.449162 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\": container with ID starting with 1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3 not found: ID does not exist" containerID="1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.449193 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3"} err="failed to get container status \"1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\": rpc error: code = NotFound desc = could not find container \"1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3\": container with ID starting with 1cf07487333e527690e4edb92630fe3efbb1bd15232ea8bb3dd97912c75179b3 not found: ID does not exist" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.449218 4877 scope.go:117] "RemoveContainer" containerID="cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae" Jan 28 16:38:55 crc kubenswrapper[4877]: E0128 16:38:55.449466 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\": container with ID starting with cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae not found: ID does not exist" containerID="cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.449501 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae"} err="failed to get container status \"cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\": rpc error: code = NotFound desc = could not find container \"cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae\": container with ID starting with cc2ee04f5af93d68f9987be63c8ba5eb16475867e7bb665f521b0bfe360caaae not found: ID does not exist" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.449513 4877 scope.go:117] "RemoveContainer" containerID="03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2" Jan 28 16:38:55 crc kubenswrapper[4877]: E0128 16:38:55.450255 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\": container with ID starting with 03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2 not found: ID does not exist" containerID="03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.450281 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2"} err="failed to get container status \"03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\": rpc error: code = NotFound desc = could not find container \"03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2\": container with ID starting with 03d242be6b1dd44aa50d7debfccad892002473135531d3ffd5a2e88860bea6b2 not found: ID does not exist" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.450297 4877 scope.go:117] "RemoveContainer" containerID="d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd" Jan 28 16:38:55 crc kubenswrapper[4877]: E0128 16:38:55.451264 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\": container with ID starting with d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd not found: ID does not exist" containerID="d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.451313 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd"} err="failed to get container status \"d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\": rpc error: code = NotFound desc = could not find container \"d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd\": container with ID starting with d5029ecd4eb609ac9bd2b773e4f330ab2495755345e716b60e8bcf4df35252fd not found: ID does not exist" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.451329 4877 scope.go:117] "RemoveContainer" containerID="8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390" Jan 28 16:38:55 crc kubenswrapper[4877]: E0128 16:38:55.452003 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\": container with ID starting with 8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390 not found: ID does not exist" containerID="8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.452067 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390"} err="failed to get container status \"8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\": rpc error: code = NotFound desc = could not find container \"8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390\": container with ID starting with 8a1ed7f5cb08bb162a5d9a9ade1c2807ef0d69b17a2816a18a72f77d40748390 not found: ID does not exist" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.452099 4877 scope.go:117] "RemoveContainer" containerID="4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4" Jan 28 16:38:55 crc kubenswrapper[4877]: E0128 16:38:55.452377 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\": container with ID starting with 4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4 not found: ID does not exist" containerID="4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4" Jan 28 16:38:55 crc kubenswrapper[4877]: I0128 16:38:55.452435 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4"} err="failed to get container status \"4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\": rpc error: code = NotFound desc = could not find container \"4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4\": container with ID starting with 4ccc8e512108eeb7a3596b6d6e32e199233384f0040e3be266c73c042c736bd4 not found: ID does not exist" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.268568 4877 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.270004 4877 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.270372 4877 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.270884 4877 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.272666 4877 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:57 crc kubenswrapper[4877]: I0128 16:38:57.272708 4877 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.272949 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="200ms" Jan 28 16:38:57 crc kubenswrapper[4877]: I0128 16:38:57.332222 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:57 crc kubenswrapper[4877]: I0128 16:38:57.332670 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.474082 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="400ms" Jan 28 16:38:57 crc kubenswrapper[4877]: E0128 16:38:57.875035 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="800ms" Jan 28 16:38:58 crc kubenswrapper[4877]: E0128 16:38:58.676461 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="1.6s" Jan 28 16:39:00 crc kubenswrapper[4877]: E0128 16:39:00.278052 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="3.2s" Jan 28 16:39:02 crc kubenswrapper[4877]: E0128 16:39:02.113257 4877 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.129.56.34:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188ef27f576553c4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 16:38:52.344210372 +0000 UTC m=+235.902537290,LastTimestamp:2026-01-28 16:38:52.344210372 +0000 UTC m=+235.902537290,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 16:39:03 crc kubenswrapper[4877]: E0128 16:39:03.480181 4877 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.34:6443: connect: connection refused" interval="6.4s" Jan 28 16:39:04 crc kubenswrapper[4877]: I0128 16:39:04.330125 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:04 crc kubenswrapper[4877]: I0128 16:39:04.331132 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:39:04 crc kubenswrapper[4877]: I0128 16:39:04.331971 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:39:04 crc kubenswrapper[4877]: I0128 16:39:04.353792 4877 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:04 crc kubenswrapper[4877]: I0128 16:39:04.353856 4877 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:04 crc kubenswrapper[4877]: E0128 16:39:04.354634 4877 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:04 crc kubenswrapper[4877]: I0128 16:39:04.355672 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:04 crc kubenswrapper[4877]: W0128 16:39:04.388342 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-1ce1de04465ecae675779bb0ddfb6ca58380b86c5df50f3961eda972ee4626ea WatchSource:0}: Error finding container 1ce1de04465ecae675779bb0ddfb6ca58380b86c5df50f3961eda972ee4626ea: Status 404 returned error can't find the container with id 1ce1de04465ecae675779bb0ddfb6ca58380b86c5df50f3961eda972ee4626ea Jan 28 16:39:05 crc kubenswrapper[4877]: I0128 16:39:05.381717 4877 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="0a8ffcfa76e2207f384f80f59e6499e37149a84d229ebb4ca5a11900c8304131" exitCode=0 Jan 28 16:39:05 crc kubenswrapper[4877]: I0128 16:39:05.381782 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"0a8ffcfa76e2207f384f80f59e6499e37149a84d229ebb4ca5a11900c8304131"} Jan 28 16:39:05 crc kubenswrapper[4877]: I0128 16:39:05.382135 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1ce1de04465ecae675779bb0ddfb6ca58380b86c5df50f3961eda972ee4626ea"} Jan 28 16:39:05 crc kubenswrapper[4877]: I0128 16:39:05.382432 4877 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:05 crc kubenswrapper[4877]: I0128 16:39:05.382452 4877 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:05 crc kubenswrapper[4877]: E0128 16:39:05.383059 4877 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:05 crc kubenswrapper[4877]: I0128 16:39:05.383121 4877 status_manager.go:851] "Failed to get status for pod" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:39:05 crc kubenswrapper[4877]: I0128 16:39:05.383912 4877 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.34:6443: connect: connection refused" Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.392509 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.392800 4877 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a" exitCode=1 Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.392897 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a"} Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.393425 4877 scope.go:117] "RemoveContainer" containerID="d5cff0e4b109b6006bf3c4bcdba8c51b7b4a11de25a0501dde0a0b2fca54d93a" Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.398600 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"205c15c644bfafeeb60017404677ac425f94d30e6f4392300f2e796c62a9bc1d"} Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.398646 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"21651d7ff3a55d234d3ad0c7ccfc4beee3b0f500c1cea2b96ea728e94a63a056"} Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.398659 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"057ae5d236bca4d9c9fc024ea9516c4d505d7686fc9b658e1e472340d05f0231"} Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.398711 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3870ad655805e4cfa60ad7fad0e7ee0f2b88ae629951d9bea0641fdc91a9548c"} Jan 28 16:39:06 crc kubenswrapper[4877]: I0128 16:39:06.741460 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.407523 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.407693 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3c572c59fd2160955503b3cec47375d0a5783aeb72f7a2264856e5527d681a67"} Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.411539 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"67c135febf40a85d17664a0991116c8891f5dc3c8870551f7b761cd8e152a0fb"} Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.411777 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.411993 4877 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.412035 4877 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.717065 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:39:07 crc kubenswrapper[4877]: I0128 16:39:07.721910 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:39:08 crc kubenswrapper[4877]: I0128 16:39:08.416849 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:39:09 crc kubenswrapper[4877]: I0128 16:39:09.356175 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:09 crc kubenswrapper[4877]: I0128 16:39:09.356586 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:09 crc kubenswrapper[4877]: I0128 16:39:09.365155 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:12 crc kubenswrapper[4877]: I0128 16:39:12.427402 4877 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.459003 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_71bb4a3aecc4ba5b26c4b7318770ce13/kube-apiserver-check-endpoints/0.log" Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.461329 4877 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="67c135febf40a85d17664a0991116c8891f5dc3c8870551f7b761cd8e152a0fb" exitCode=255 Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.461369 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"67c135febf40a85d17664a0991116c8891f5dc3c8870551f7b761cd8e152a0fb"} Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.461807 4877 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.461835 4877 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.465106 4877 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="5f989784-8f11-4c1d-9944-e789e454c90e" Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.465684 4877 scope.go:117] "RemoveContainer" containerID="67c135febf40a85d17664a0991116c8891f5dc3c8870551f7b761cd8e152a0fb" Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.466848 4877 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://3870ad655805e4cfa60ad7fad0e7ee0f2b88ae629951d9bea0641fdc91a9548c" Jan 28 16:39:13 crc kubenswrapper[4877]: I0128 16:39:13.466869 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:14 crc kubenswrapper[4877]: I0128 16:39:14.473934 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_71bb4a3aecc4ba5b26c4b7318770ce13/kube-apiserver-check-endpoints/0.log" Jan 28 16:39:14 crc kubenswrapper[4877]: I0128 16:39:14.476881 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d78fa6c275b216b21ad035067a0122cc25f9feaf2292f97d69f5a606bef73ba1"} Jan 28 16:39:14 crc kubenswrapper[4877]: I0128 16:39:14.477349 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:14 crc kubenswrapper[4877]: I0128 16:39:14.477447 4877 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:14 crc kubenswrapper[4877]: I0128 16:39:14.477534 4877 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:15 crc kubenswrapper[4877]: I0128 16:39:15.483862 4877 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:15 crc kubenswrapper[4877]: I0128 16:39:15.484250 4877 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="930e5f2b-0289-4e2c-878b-85bd08af1049" Jan 28 16:39:17 crc kubenswrapper[4877]: I0128 16:39:17.349698 4877 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="5f989784-8f11-4c1d-9944-e789e454c90e" Jan 28 16:39:21 crc kubenswrapper[4877]: I0128 16:39:21.851614 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 16:39:22 crc kubenswrapper[4877]: I0128 16:39:22.184119 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 16:39:22 crc kubenswrapper[4877]: I0128 16:39:22.497017 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 16:39:22 crc kubenswrapper[4877]: I0128 16:39:22.723316 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 16:39:22 crc kubenswrapper[4877]: I0128 16:39:22.882943 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 16:39:22 crc kubenswrapper[4877]: I0128 16:39:22.993006 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 16:39:23 crc kubenswrapper[4877]: I0128 16:39:23.672094 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 16:39:23 crc kubenswrapper[4877]: I0128 16:39:23.874954 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 16:39:24 crc kubenswrapper[4877]: I0128 16:39:24.509791 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 16:39:24 crc kubenswrapper[4877]: I0128 16:39:24.571745 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 16:39:24 crc kubenswrapper[4877]: I0128 16:39:24.903085 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.100108 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.102442 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.149620 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.168968 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.199020 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.298746 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.404500 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.578972 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.752403 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 16:39:25 crc kubenswrapper[4877]: I0128 16:39:25.761037 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.000642 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.031642 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.185930 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.239273 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.241605 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.250085 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.311614 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.321530 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.438551 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.523463 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.615499 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.745044 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.772053 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.797540 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.815271 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.839514 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.839836 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.861667 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.872417 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 16:39:26 crc kubenswrapper[4877]: I0128 16:39:26.922187 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.001243 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.006863 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.132143 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.186709 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.249376 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.270990 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.339967 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.361769 4877 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.410167 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.446088 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.504677 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.599788 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.710347 4877 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.719946 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.804763 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.823375 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.825717 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 16:39:27 crc kubenswrapper[4877]: I0128 16:39:27.961190 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.000675 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.032128 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.054902 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.083930 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.181157 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.257163 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.311617 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.378070 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.388831 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.389808 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.437585 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.709538 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 16:39:28 crc kubenswrapper[4877]: I0128 16:39:28.824188 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.101957 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.248803 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.382260 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.410569 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.519224 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.566326 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.598557 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.641239 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.736388 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.929683 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.953420 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.965421 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 16:39:29 crc kubenswrapper[4877]: I0128 16:39:29.979056 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.004244 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.058658 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.116464 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.125937 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.232960 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.338208 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.340409 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.352404 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.439330 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.443972 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.474349 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.478315 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.503827 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.510937 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.520176 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.550062 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.588428 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.603437 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.646655 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.655397 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.667579 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.678021 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.721044 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.758752 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.793940 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.819458 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.854614 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.888256 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 16:39:30 crc kubenswrapper[4877]: I0128 16:39:30.920719 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.013806 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.066278 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.080290 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.154882 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.183115 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.205906 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.227374 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.238274 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.266345 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.316939 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.401017 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.410228 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.420596 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.453550 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.471492 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.532215 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.577452 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.603683 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.649051 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.685617 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.752996 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.786040 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.796871 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.848703 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.851590 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.871863 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 16:39:31 crc kubenswrapper[4877]: I0128 16:39:31.925536 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.005339 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.005529 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.194243 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.224624 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.245352 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.280341 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.331761 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.398280 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.400165 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.502894 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.529991 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.560054 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.575502 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.652825 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.662239 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.673233 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.776614 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.816782 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.945168 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 16:39:32 crc kubenswrapper[4877]: I0128 16:39:32.980515 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.029113 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.123898 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.162631 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.163648 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.185164 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.191792 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.271380 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.308533 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.329798 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.420914 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.561950 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.640640 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.678325 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.692963 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.805549 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.933382 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.936944 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 16:39:33 crc kubenswrapper[4877]: I0128 16:39:33.992338 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.022729 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.033775 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.101032 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.189647 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.205055 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.276085 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.277712 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.307250 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.332110 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.393337 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.397928 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.450708 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.466918 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.586713 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.610307 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.656398 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.800419 4877 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.843259 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.885625 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 16:39:34 crc kubenswrapper[4877]: I0128 16:39:34.977998 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.022143 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.102431 4877 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.112549 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.193658 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.264704 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.291398 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.409715 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.818052 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.875045 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.899354 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.910291 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 16:39:35 crc kubenswrapper[4877]: I0128 16:39:35.997462 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.001940 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.051835 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.061337 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.127494 4877 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.133940 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.212553 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.235568 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.264913 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.322996 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.417313 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.426091 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.464438 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.495564 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.516389 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.518636 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.520223 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.642421 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.684309 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.791665 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 16:39:36 crc kubenswrapper[4877]: I0128 16:39:36.921143 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.010374 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.045312 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.230551 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.251916 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.366903 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.509391 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.711980 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.768976 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.778702 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 16:39:37 crc kubenswrapper[4877]: I0128 16:39:37.794585 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.319862 4877 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.321735 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=46.321712057 podStartE2EDuration="46.321712057s" podCreationTimestamp="2026-01-28 16:38:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:39:12.221560273 +0000 UTC m=+255.779887161" watchObservedRunningTime="2026-01-28 16:39:38.321712057 +0000 UTC m=+281.880038955" Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.326783 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.326845 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.336743 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.368084 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=26.368055882 podStartE2EDuration="26.368055882s" podCreationTimestamp="2026-01-28 16:39:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:39:38.361303116 +0000 UTC m=+281.919630014" watchObservedRunningTime="2026-01-28 16:39:38.368055882 +0000 UTC m=+281.926382760" Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.646381 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.780044 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.831859 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 16:39:38 crc kubenswrapper[4877]: I0128 16:39:38.859799 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 16:39:39 crc kubenswrapper[4877]: I0128 16:39:39.156546 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 16:39:39 crc kubenswrapper[4877]: I0128 16:39:39.190897 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 16:39:39 crc kubenswrapper[4877]: I0128 16:39:39.397897 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 16:39:40 crc kubenswrapper[4877]: I0128 16:39:40.113597 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 16:39:40 crc kubenswrapper[4877]: I0128 16:39:40.329168 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 16:39:40 crc kubenswrapper[4877]: I0128 16:39:40.521062 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 16:39:40 crc kubenswrapper[4877]: I0128 16:39:40.887160 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 16:39:46 crc kubenswrapper[4877]: I0128 16:39:46.212252 4877 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 16:39:46 crc kubenswrapper[4877]: I0128 16:39:46.212844 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://02d5c419b53a198d482d72da16c156375af65c60bd3795cd3813cdeb36b4ef5e" gracePeriod=5 Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.723591 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.724341 4877 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="02d5c419b53a198d482d72da16c156375af65c60bd3795cd3813cdeb36b4ef5e" exitCode=137 Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.785637 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.785753 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.898753 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899044 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899139 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899133 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899217 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899256 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899279 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899301 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899408 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899793 4877 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899839 4877 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899883 4877 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.899915 4877 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:39:51 crc kubenswrapper[4877]: I0128 16:39:51.913143 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:39:52 crc kubenswrapper[4877]: I0128 16:39:52.001220 4877 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 16:39:52 crc kubenswrapper[4877]: I0128 16:39:52.733949 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 16:39:52 crc kubenswrapper[4877]: I0128 16:39:52.734036 4877 scope.go:117] "RemoveContainer" containerID="02d5c419b53a198d482d72da16c156375af65c60bd3795cd3813cdeb36b4ef5e" Jan 28 16:39:52 crc kubenswrapper[4877]: I0128 16:39:52.734130 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 16:39:53 crc kubenswrapper[4877]: I0128 16:39:53.347225 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 28 16:39:53 crc kubenswrapper[4877]: I0128 16:39:53.347607 4877 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 28 16:39:53 crc kubenswrapper[4877]: I0128 16:39:53.363923 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 16:39:53 crc kubenswrapper[4877]: I0128 16:39:53.363967 4877 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="e46295a7-d8ac-47dc-87b8-18558a803b7e" Jan 28 16:39:53 crc kubenswrapper[4877]: I0128 16:39:53.368299 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 16:39:53 crc kubenswrapper[4877]: I0128 16:39:53.368491 4877 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="e46295a7-d8ac-47dc-87b8-18558a803b7e" Jan 28 16:39:55 crc kubenswrapper[4877]: I0128 16:39:55.760804 4877 generic.go:334] "Generic (PLEG): container finished" podID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerID="dbf9d3e7d5bbaf2e290fbb65cc7684e1af3f3a60eeffb7ae0b777c27af2ceadd" exitCode=0 Jan 28 16:39:55 crc kubenswrapper[4877]: I0128 16:39:55.760874 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" event={"ID":"6a92f67e-224e-40a8-893d-edbe8dad2036","Type":"ContainerDied","Data":"dbf9d3e7d5bbaf2e290fbb65cc7684e1af3f3a60eeffb7ae0b777c27af2ceadd"} Jan 28 16:39:55 crc kubenswrapper[4877]: I0128 16:39:55.761626 4877 scope.go:117] "RemoveContainer" containerID="dbf9d3e7d5bbaf2e290fbb65cc7684e1af3f3a60eeffb7ae0b777c27af2ceadd" Jan 28 16:39:56 crc kubenswrapper[4877]: I0128 16:39:56.771043 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" event={"ID":"6a92f67e-224e-40a8-893d-edbe8dad2036","Type":"ContainerStarted","Data":"f76f59a26b35bdff1adb4870f156ce2a0fa1038f5ae7b97fd3b42464dd6f51d8"} Jan 28 16:39:56 crc kubenswrapper[4877]: I0128 16:39:56.772065 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:39:56 crc kubenswrapper[4877]: I0128 16:39:56.774518 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:39:57 crc kubenswrapper[4877]: I0128 16:39:57.055443 4877 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.091257 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xjv5z"] Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.092033 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" podUID="fbd014e8-90c4-488c-88fa-b68493bebb36" containerName="controller-manager" containerID="cri-o://ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b" gracePeriod=30 Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.266140 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764"] Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.266744 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" podUID="1f30a6e5-e444-46ee-8756-cac33b69c05e" containerName="route-controller-manager" containerID="cri-o://e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109" gracePeriod=30 Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.494862 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.557018 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-config\") pod \"fbd014e8-90c4-488c-88fa-b68493bebb36\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.557145 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbd014e8-90c4-488c-88fa-b68493bebb36-serving-cert\") pod \"fbd014e8-90c4-488c-88fa-b68493bebb36\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.557258 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-proxy-ca-bundles\") pod \"fbd014e8-90c4-488c-88fa-b68493bebb36\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.557345 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sl4g5\" (UniqueName: \"kubernetes.io/projected/fbd014e8-90c4-488c-88fa-b68493bebb36-kube-api-access-sl4g5\") pod \"fbd014e8-90c4-488c-88fa-b68493bebb36\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.557412 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-client-ca\") pod \"fbd014e8-90c4-488c-88fa-b68493bebb36\" (UID: \"fbd014e8-90c4-488c-88fa-b68493bebb36\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.558023 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-client-ca" (OuterVolumeSpecName: "client-ca") pod "fbd014e8-90c4-488c-88fa-b68493bebb36" (UID: "fbd014e8-90c4-488c-88fa-b68493bebb36"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.558202 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-config" (OuterVolumeSpecName: "config") pod "fbd014e8-90c4-488c-88fa-b68493bebb36" (UID: "fbd014e8-90c4-488c-88fa-b68493bebb36"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.558987 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "fbd014e8-90c4-488c-88fa-b68493bebb36" (UID: "fbd014e8-90c4-488c-88fa-b68493bebb36"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.566871 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbd014e8-90c4-488c-88fa-b68493bebb36-kube-api-access-sl4g5" (OuterVolumeSpecName: "kube-api-access-sl4g5") pod "fbd014e8-90c4-488c-88fa-b68493bebb36" (UID: "fbd014e8-90c4-488c-88fa-b68493bebb36"). InnerVolumeSpecName "kube-api-access-sl4g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.568899 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbd014e8-90c4-488c-88fa-b68493bebb36-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fbd014e8-90c4-488c-88fa-b68493bebb36" (UID: "fbd014e8-90c4-488c-88fa-b68493bebb36"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.609770 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.660069 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-client-ca\") pod \"1f30a6e5-e444-46ee-8756-cac33b69c05e\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.660202 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f30a6e5-e444-46ee-8756-cac33b69c05e-serving-cert\") pod \"1f30a6e5-e444-46ee-8756-cac33b69c05e\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.660245 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2dwf\" (UniqueName: \"kubernetes.io/projected/1f30a6e5-e444-46ee-8756-cac33b69c05e-kube-api-access-c2dwf\") pod \"1f30a6e5-e444-46ee-8756-cac33b69c05e\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.660285 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-config\") pod \"1f30a6e5-e444-46ee-8756-cac33b69c05e\" (UID: \"1f30a6e5-e444-46ee-8756-cac33b69c05e\") " Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.661720 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-config" (OuterVolumeSpecName: "config") pod "1f30a6e5-e444-46ee-8756-cac33b69c05e" (UID: "1f30a6e5-e444-46ee-8756-cac33b69c05e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.662365 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-client-ca" (OuterVolumeSpecName: "client-ca") pod "1f30a6e5-e444-46ee-8756-cac33b69c05e" (UID: "1f30a6e5-e444-46ee-8756-cac33b69c05e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.662996 4877 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.663034 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sl4g5\" (UniqueName: \"kubernetes.io/projected/fbd014e8-90c4-488c-88fa-b68493bebb36-kube-api-access-sl4g5\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.663048 4877 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.663057 4877 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.663067 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbd014e8-90c4-488c-88fa-b68493bebb36-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.663076 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbd014e8-90c4-488c-88fa-b68493bebb36-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.663085 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f30a6e5-e444-46ee-8756-cac33b69c05e-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.665851 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f30a6e5-e444-46ee-8756-cac33b69c05e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1f30a6e5-e444-46ee-8756-cac33b69c05e" (UID: "1f30a6e5-e444-46ee-8756-cac33b69c05e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.667175 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f30a6e5-e444-46ee-8756-cac33b69c05e-kube-api-access-c2dwf" (OuterVolumeSpecName: "kube-api-access-c2dwf") pod "1f30a6e5-e444-46ee-8756-cac33b69c05e" (UID: "1f30a6e5-e444-46ee-8756-cac33b69c05e"). InnerVolumeSpecName "kube-api-access-c2dwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.765077 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f30a6e5-e444-46ee-8756-cac33b69c05e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.765124 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2dwf\" (UniqueName: \"kubernetes.io/projected/1f30a6e5-e444-46ee-8756-cac33b69c05e-kube-api-access-c2dwf\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.824523 4877 generic.go:334] "Generic (PLEG): container finished" podID="1f30a6e5-e444-46ee-8756-cac33b69c05e" containerID="e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109" exitCode=0 Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.824657 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.824802 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" event={"ID":"1f30a6e5-e444-46ee-8756-cac33b69c05e","Type":"ContainerDied","Data":"e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109"} Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.824941 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764" event={"ID":"1f30a6e5-e444-46ee-8756-cac33b69c05e","Type":"ContainerDied","Data":"5dd9c095a930649db5cd45a4052aeea602fe2a1e09dd5d756bfe80909e9fe2dd"} Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.825007 4877 scope.go:117] "RemoveContainer" containerID="e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.828951 4877 generic.go:334] "Generic (PLEG): container finished" podID="fbd014e8-90c4-488c-88fa-b68493bebb36" containerID="ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b" exitCode=0 Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.829009 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" event={"ID":"fbd014e8-90c4-488c-88fa-b68493bebb36","Type":"ContainerDied","Data":"ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b"} Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.829046 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" event={"ID":"fbd014e8-90c4-488c-88fa-b68493bebb36","Type":"ContainerDied","Data":"eceda3fa8e4496950b88d49b96597cc2fd173a5e7acc9700ddb2b6c04e8de563"} Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.829075 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xjv5z" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.851215 4877 scope.go:117] "RemoveContainer" containerID="e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109" Jan 28 16:40:01 crc kubenswrapper[4877]: E0128 16:40:01.851958 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109\": container with ID starting with e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109 not found: ID does not exist" containerID="e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.852067 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109"} err="failed to get container status \"e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109\": rpc error: code = NotFound desc = could not find container \"e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109\": container with ID starting with e1e208c07a387a7901f5e879cf39db35b604e69e38aca6555ea6bc4f60c26109 not found: ID does not exist" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.852133 4877 scope.go:117] "RemoveContainer" containerID="ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.878410 4877 scope.go:117] "RemoveContainer" containerID="ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b" Jan 28 16:40:01 crc kubenswrapper[4877]: E0128 16:40:01.879579 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b\": container with ID starting with ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b not found: ID does not exist" containerID="ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.879630 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b"} err="failed to get container status \"ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b\": rpc error: code = NotFound desc = could not find container \"ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b\": container with ID starting with ff23b8f1648663f3538c113718ece3ec3294440a49c298a8dba1cca56c42f78b not found: ID does not exist" Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.885282 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764"] Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.894400 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ck764"] Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.901731 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xjv5z"] Jan 28 16:40:01 crc kubenswrapper[4877]: I0128 16:40:01.908246 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xjv5z"] Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.038931 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-wm2fs"] Jan 28 16:40:03 crc kubenswrapper[4877]: E0128 16:40:03.039793 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.039825 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 16:40:03 crc kubenswrapper[4877]: E0128 16:40:03.039857 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbd014e8-90c4-488c-88fa-b68493bebb36" containerName="controller-manager" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.039875 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbd014e8-90c4-488c-88fa-b68493bebb36" containerName="controller-manager" Jan 28 16:40:03 crc kubenswrapper[4877]: E0128 16:40:03.039905 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f30a6e5-e444-46ee-8756-cac33b69c05e" containerName="route-controller-manager" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.039924 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f30a6e5-e444-46ee-8756-cac33b69c05e" containerName="route-controller-manager" Jan 28 16:40:03 crc kubenswrapper[4877]: E0128 16:40:03.039943 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" containerName="installer" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.039959 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" containerName="installer" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.040200 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbd014e8-90c4-488c-88fa-b68493bebb36" containerName="controller-manager" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.040243 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f30a6e5-e444-46ee-8756-cac33b69c05e" containerName="route-controller-manager" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.040270 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.040291 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a2b17ce-acb8-4c3d-830c-5fb8b521a5d8" containerName="installer" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.041096 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.047084 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.047141 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.047878 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.048152 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.047878 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.048006 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.053772 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r"] Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.055244 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.062451 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r"] Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.063874 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.064422 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.064676 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.064987 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.065042 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.066056 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.077457 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.092354 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb7ln\" (UniqueName: \"kubernetes.io/projected/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-kube-api-access-jb7ln\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.092937 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-config\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.092994 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-client-ca\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.093086 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdkts\" (UniqueName: \"kubernetes.io/projected/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-kube-api-access-hdkts\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.093187 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-client-ca\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.093246 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-config\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.093279 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-serving-cert\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.093323 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-serving-cert\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.093553 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.100017 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-wm2fs"] Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194664 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdkts\" (UniqueName: \"kubernetes.io/projected/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-kube-api-access-hdkts\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194734 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-client-ca\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194783 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-config\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194814 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-serving-cert\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194841 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-serving-cert\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194873 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194915 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb7ln\" (UniqueName: \"kubernetes.io/projected/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-kube-api-access-jb7ln\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194939 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-config\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.194965 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-client-ca\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.196468 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-client-ca\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.197428 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-proxy-ca-bundles\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.197999 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-config\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.198778 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-config\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.199390 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-client-ca\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.200735 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-serving-cert\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.209271 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-serving-cert\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.212139 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdkts\" (UniqueName: \"kubernetes.io/projected/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-kube-api-access-hdkts\") pod \"controller-manager-fb864b4d-wm2fs\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.219801 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb7ln\" (UniqueName: \"kubernetes.io/projected/59c5ea7f-f3a1-4fa3-882c-5690f3af3026-kube-api-access-jb7ln\") pod \"route-controller-manager-77c8bf88f9-l4t7r\" (UID: \"59c5ea7f-f3a1-4fa3-882c-5690f3af3026\") " pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.343236 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f30a6e5-e444-46ee-8756-cac33b69c05e" path="/var/lib/kubelet/pods/1f30a6e5-e444-46ee-8756-cac33b69c05e/volumes" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.344362 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbd014e8-90c4-488c-88fa-b68493bebb36" path="/var/lib/kubelet/pods/fbd014e8-90c4-488c-88fa-b68493bebb36/volumes" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.421045 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.432852 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.707077 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r"] Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.755119 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-wm2fs"] Jan 28 16:40:03 crc kubenswrapper[4877]: W0128 16:40:03.766231 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e1bea3e_cdc1_4318_8329_e0b36fd1d306.slice/crio-48ea9d72aa2e995525dc516ea194b88bcabd67b0154b27f632f3d494edc3722d WatchSource:0}: Error finding container 48ea9d72aa2e995525dc516ea194b88bcabd67b0154b27f632f3d494edc3722d: Status 404 returned error can't find the container with id 48ea9d72aa2e995525dc516ea194b88bcabd67b0154b27f632f3d494edc3722d Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.846056 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" event={"ID":"59c5ea7f-f3a1-4fa3-882c-5690f3af3026","Type":"ContainerStarted","Data":"1679f32a7e5e8a381623de155c4352ebfd5f18d4170407b7dcf3985764fd28d6"} Jan 28 16:40:03 crc kubenswrapper[4877]: I0128 16:40:03.847115 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" event={"ID":"1e1bea3e-cdc1-4318-8329-e0b36fd1d306","Type":"ContainerStarted","Data":"48ea9d72aa2e995525dc516ea194b88bcabd67b0154b27f632f3d494edc3722d"} Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.854167 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" event={"ID":"59c5ea7f-f3a1-4fa3-882c-5690f3af3026","Type":"ContainerStarted","Data":"a4fe44a180bd3f1f498f7a22c8dc70f20e28f6418177a04e7de593eff768dcb0"} Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.855901 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.856370 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" event={"ID":"1e1bea3e-cdc1-4318-8329-e0b36fd1d306","Type":"ContainerStarted","Data":"1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a"} Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.856948 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.860593 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.867591 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.903097 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" podStartSLOduration=3.903065659 podStartE2EDuration="3.903065659s" podCreationTimestamp="2026-01-28 16:40:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:40:04.87765408 +0000 UTC m=+308.435981008" watchObservedRunningTime="2026-01-28 16:40:04.903065659 +0000 UTC m=+308.461392587" Jan 28 16:40:04 crc kubenswrapper[4877]: I0128 16:40:04.904183 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" podStartSLOduration=3.904171308 podStartE2EDuration="3.904171308s" podCreationTimestamp="2026-01-28 16:40:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:40:04.899562382 +0000 UTC m=+308.457889270" watchObservedRunningTime="2026-01-28 16:40:04.904171308 +0000 UTC m=+308.462498236" Jan 28 16:40:15 crc kubenswrapper[4877]: I0128 16:40:15.547261 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hx6kq"] Jan 28 16:40:15 crc kubenswrapper[4877]: I0128 16:40:15.548013 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hx6kq" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="registry-server" containerID="cri-o://07d4bb41297d9db30f69d5e358773aa5c35c6eab7dd4240deb4ecf4f9918e6db" gracePeriod=2 Jan 28 16:40:15 crc kubenswrapper[4877]: I0128 16:40:15.939971 4877 generic.go:334] "Generic (PLEG): container finished" podID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerID="07d4bb41297d9db30f69d5e358773aa5c35c6eab7dd4240deb4ecf4f9918e6db" exitCode=0 Jan 28 16:40:15 crc kubenswrapper[4877]: I0128 16:40:15.940065 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hx6kq" event={"ID":"cad628ad-2502-408b-ab7a-4a5be2d1637f","Type":"ContainerDied","Data":"07d4bb41297d9db30f69d5e358773aa5c35c6eab7dd4240deb4ecf4f9918e6db"} Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.051011 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.099135 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-catalog-content\") pod \"cad628ad-2502-408b-ab7a-4a5be2d1637f\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.099239 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-utilities\") pod \"cad628ad-2502-408b-ab7a-4a5be2d1637f\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.099284 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tth6\" (UniqueName: \"kubernetes.io/projected/cad628ad-2502-408b-ab7a-4a5be2d1637f-kube-api-access-6tth6\") pod \"cad628ad-2502-408b-ab7a-4a5be2d1637f\" (UID: \"cad628ad-2502-408b-ab7a-4a5be2d1637f\") " Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.100263 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-utilities" (OuterVolumeSpecName: "utilities") pod "cad628ad-2502-408b-ab7a-4a5be2d1637f" (UID: "cad628ad-2502-408b-ab7a-4a5be2d1637f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.105209 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cad628ad-2502-408b-ab7a-4a5be2d1637f-kube-api-access-6tth6" (OuterVolumeSpecName: "kube-api-access-6tth6") pod "cad628ad-2502-408b-ab7a-4a5be2d1637f" (UID: "cad628ad-2502-408b-ab7a-4a5be2d1637f"). InnerVolumeSpecName "kube-api-access-6tth6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.122825 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cad628ad-2502-408b-ab7a-4a5be2d1637f" (UID: "cad628ad-2502-408b-ab7a-4a5be2d1637f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.201148 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.201193 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cad628ad-2502-408b-ab7a-4a5be2d1637f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.201208 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tth6\" (UniqueName: \"kubernetes.io/projected/cad628ad-2502-408b-ab7a-4a5be2d1637f-kube-api-access-6tth6\") on node \"crc\" DevicePath \"\"" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.953329 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hx6kq" event={"ID":"cad628ad-2502-408b-ab7a-4a5be2d1637f","Type":"ContainerDied","Data":"c8cc4035ce257bb0d479e24f813d3f36a76ac2386d4e6afcdd14483be805a8e9"} Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.953470 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hx6kq" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.953955 4877 scope.go:117] "RemoveContainer" containerID="07d4bb41297d9db30f69d5e358773aa5c35c6eab7dd4240deb4ecf4f9918e6db" Jan 28 16:40:16 crc kubenswrapper[4877]: I0128 16:40:16.978883 4877 scope.go:117] "RemoveContainer" containerID="9d6a2ce44e8a3d3efc557ea24ba53c1ee1124d4e43d2122598cee4aeeaf6736b" Jan 28 16:40:17 crc kubenswrapper[4877]: I0128 16:40:17.014948 4877 scope.go:117] "RemoveContainer" containerID="9feffce29fbf8a73bbfee10e9cbee0563c9a9b3618acc6d08257c6283b2efd1a" Jan 28 16:40:17 crc kubenswrapper[4877]: I0128 16:40:17.020651 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hx6kq"] Jan 28 16:40:17 crc kubenswrapper[4877]: I0128 16:40:17.028788 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hx6kq"] Jan 28 16:40:17 crc kubenswrapper[4877]: I0128 16:40:17.348078 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" path="/var/lib/kubelet/pods/cad628ad-2502-408b-ab7a-4a5be2d1637f/volumes" Jan 28 16:40:37 crc kubenswrapper[4877]: I0128 16:40:37.076870 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:40:37 crc kubenswrapper[4877]: I0128 16:40:37.078701 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.809195 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-62l6g"] Jan 28 16:40:54 crc kubenswrapper[4877]: E0128 16:40:54.809908 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="extract-utilities" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.809920 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="extract-utilities" Jan 28 16:40:54 crc kubenswrapper[4877]: E0128 16:40:54.809929 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="registry-server" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.809936 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="registry-server" Jan 28 16:40:54 crc kubenswrapper[4877]: E0128 16:40:54.809958 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="extract-content" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.809965 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="extract-content" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.810052 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="cad628ad-2502-408b-ab7a-4a5be2d1637f" containerName="registry-server" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.810440 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.839908 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-62l6g"] Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961105 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/652e294d-efe3-4f93-828f-c6cacf3d7166-ca-trust-extracted\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961148 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-registry-tls\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961178 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-bound-sa-token\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961198 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/652e294d-efe3-4f93-828f-c6cacf3d7166-registry-certificates\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961224 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt6ns\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-kube-api-access-pt6ns\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961251 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/652e294d-efe3-4f93-828f-c6cacf3d7166-trusted-ca\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961288 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/652e294d-efe3-4f93-828f-c6cacf3d7166-installation-pull-secrets\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.961406 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:54 crc kubenswrapper[4877]: I0128 16:40:54.993646 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.063162 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt6ns\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-kube-api-access-pt6ns\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.063235 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/652e294d-efe3-4f93-828f-c6cacf3d7166-trusted-ca\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.063285 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/652e294d-efe3-4f93-828f-c6cacf3d7166-installation-pull-secrets\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.063343 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/652e294d-efe3-4f93-828f-c6cacf3d7166-ca-trust-extracted\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.063370 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-registry-tls\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.063402 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-bound-sa-token\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.063432 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/652e294d-efe3-4f93-828f-c6cacf3d7166-registry-certificates\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.064412 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/652e294d-efe3-4f93-828f-c6cacf3d7166-ca-trust-extracted\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.065385 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/652e294d-efe3-4f93-828f-c6cacf3d7166-registry-certificates\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.068839 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/652e294d-efe3-4f93-828f-c6cacf3d7166-trusted-ca\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.072759 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-registry-tls\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.079804 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/652e294d-efe3-4f93-828f-c6cacf3d7166-installation-pull-secrets\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.087248 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt6ns\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-kube-api-access-pt6ns\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.087392 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/652e294d-efe3-4f93-828f-c6cacf3d7166-bound-sa-token\") pod \"image-registry-66df7c8f76-62l6g\" (UID: \"652e294d-efe3-4f93-828f-c6cacf3d7166\") " pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.133354 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:55 crc kubenswrapper[4877]: I0128 16:40:55.597900 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-62l6g"] Jan 28 16:40:56 crc kubenswrapper[4877]: I0128 16:40:56.231636 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" event={"ID":"652e294d-efe3-4f93-828f-c6cacf3d7166","Type":"ContainerStarted","Data":"49df7ca445d7b60569ecc5d62ac523552054bb19a06945749f439064c2a11fa8"} Jan 28 16:40:56 crc kubenswrapper[4877]: I0128 16:40:56.232063 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" event={"ID":"652e294d-efe3-4f93-828f-c6cacf3d7166","Type":"ContainerStarted","Data":"0b179b69723f92a8a728a312c19d18ebb55e68f3d2f20a8e63c205fbd7ce4d6d"} Jan 28 16:40:56 crc kubenswrapper[4877]: I0128 16:40:56.232089 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:40:56 crc kubenswrapper[4877]: I0128 16:40:56.253097 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" podStartSLOduration=2.253078525 podStartE2EDuration="2.253078525s" podCreationTimestamp="2026-01-28 16:40:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:40:56.250942326 +0000 UTC m=+359.809269214" watchObservedRunningTime="2026-01-28 16:40:56.253078525 +0000 UTC m=+359.811405413" Jan 28 16:41:01 crc kubenswrapper[4877]: I0128 16:41:01.499238 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-wm2fs"] Jan 28 16:41:01 crc kubenswrapper[4877]: I0128 16:41:01.500694 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" podUID="1e1bea3e-cdc1-4318-8329-e0b36fd1d306" containerName="controller-manager" containerID="cri-o://1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a" gracePeriod=30 Jan 28 16:41:01 crc kubenswrapper[4877]: I0128 16:41:01.980315 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.076548 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-config\") pod \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.076687 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-serving-cert\") pod \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.076738 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdkts\" (UniqueName: \"kubernetes.io/projected/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-kube-api-access-hdkts\") pod \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.076757 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-proxy-ca-bundles\") pod \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.076834 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-client-ca\") pod \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\" (UID: \"1e1bea3e-cdc1-4318-8329-e0b36fd1d306\") " Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.077950 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "1e1bea3e-cdc1-4318-8329-e0b36fd1d306" (UID: "1e1bea3e-cdc1-4318-8329-e0b36fd1d306"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.077961 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-client-ca" (OuterVolumeSpecName: "client-ca") pod "1e1bea3e-cdc1-4318-8329-e0b36fd1d306" (UID: "1e1bea3e-cdc1-4318-8329-e0b36fd1d306"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.078462 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-config" (OuterVolumeSpecName: "config") pod "1e1bea3e-cdc1-4318-8329-e0b36fd1d306" (UID: "1e1bea3e-cdc1-4318-8329-e0b36fd1d306"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.084666 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1e1bea3e-cdc1-4318-8329-e0b36fd1d306" (UID: "1e1bea3e-cdc1-4318-8329-e0b36fd1d306"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.085776 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-kube-api-access-hdkts" (OuterVolumeSpecName: "kube-api-access-hdkts") pod "1e1bea3e-cdc1-4318-8329-e0b36fd1d306" (UID: "1e1bea3e-cdc1-4318-8329-e0b36fd1d306"). InnerVolumeSpecName "kube-api-access-hdkts". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.178698 4877 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.178760 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdkts\" (UniqueName: \"kubernetes.io/projected/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-kube-api-access-hdkts\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.178782 4877 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.178796 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.178810 4877 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e1bea3e-cdc1-4318-8329-e0b36fd1d306-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.272944 4877 generic.go:334] "Generic (PLEG): container finished" podID="1e1bea3e-cdc1-4318-8329-e0b36fd1d306" containerID="1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a" exitCode=0 Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.273015 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.273069 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" event={"ID":"1e1bea3e-cdc1-4318-8329-e0b36fd1d306","Type":"ContainerDied","Data":"1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a"} Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.273698 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-fb864b4d-wm2fs" event={"ID":"1e1bea3e-cdc1-4318-8329-e0b36fd1d306","Type":"ContainerDied","Data":"48ea9d72aa2e995525dc516ea194b88bcabd67b0154b27f632f3d494edc3722d"} Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.273729 4877 scope.go:117] "RemoveContainer" containerID="1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.307524 4877 scope.go:117] "RemoveContainer" containerID="1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a" Jan 28 16:41:02 crc kubenswrapper[4877]: E0128 16:41:02.308661 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a\": container with ID starting with 1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a not found: ID does not exist" containerID="1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.308767 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a"} err="failed to get container status \"1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a\": rpc error: code = NotFound desc = could not find container \"1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a\": container with ID starting with 1dd17f3586d843f7fa20a7668815e1e2b251313b90c30c37b5eec6299ff8e03a not found: ID does not exist" Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.313022 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-wm2fs"] Jan 28 16:41:02 crc kubenswrapper[4877]: I0128 16:41:02.316304 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-fb864b4d-wm2fs"] Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.080424 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-678cc9d6c4-99z4w"] Jan 28 16:41:03 crc kubenswrapper[4877]: E0128 16:41:03.081463 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e1bea3e-cdc1-4318-8329-e0b36fd1d306" containerName="controller-manager" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.081515 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e1bea3e-cdc1-4318-8329-e0b36fd1d306" containerName="controller-manager" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.081724 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e1bea3e-cdc1-4318-8329-e0b36fd1d306" containerName="controller-manager" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.082402 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.086641 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.086642 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.087564 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.087726 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.090309 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.090890 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.117618 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.120006 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-678cc9d6c4-99z4w"] Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.198361 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-proxy-ca-bundles\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.198431 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-client-ca\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.198526 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46eee1f6-965e-4d6d-a520-77b2f472b164-serving-cert\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.198563 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-config\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.198703 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gv6bn\" (UniqueName: \"kubernetes.io/projected/46eee1f6-965e-4d6d-a520-77b2f472b164-kube-api-access-gv6bn\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.300598 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-proxy-ca-bundles\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.300690 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-client-ca\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.300774 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46eee1f6-965e-4d6d-a520-77b2f472b164-serving-cert\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.300815 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-config\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.300859 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gv6bn\" (UniqueName: \"kubernetes.io/projected/46eee1f6-965e-4d6d-a520-77b2f472b164-kube-api-access-gv6bn\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.303134 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-client-ca\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.304372 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-config\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.304929 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/46eee1f6-965e-4d6d-a520-77b2f472b164-proxy-ca-bundles\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.309654 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46eee1f6-965e-4d6d-a520-77b2f472b164-serving-cert\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.339222 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gv6bn\" (UniqueName: \"kubernetes.io/projected/46eee1f6-965e-4d6d-a520-77b2f472b164-kube-api-access-gv6bn\") pod \"controller-manager-678cc9d6c4-99z4w\" (UID: \"46eee1f6-965e-4d6d-a520-77b2f472b164\") " pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.358088 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e1bea3e-cdc1-4318-8329-e0b36fd1d306" path="/var/lib/kubelet/pods/1e1bea3e-cdc1-4318-8329-e0b36fd1d306/volumes" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.422893 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:03 crc kubenswrapper[4877]: I0128 16:41:03.674059 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-678cc9d6c4-99z4w"] Jan 28 16:41:04 crc kubenswrapper[4877]: I0128 16:41:04.289670 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" event={"ID":"46eee1f6-965e-4d6d-a520-77b2f472b164","Type":"ContainerStarted","Data":"6bcab5713b5621eb7db3cbd28a75712d6d366ef3c644f806b2269be661cd1430"} Jan 28 16:41:04 crc kubenswrapper[4877]: I0128 16:41:04.289729 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" event={"ID":"46eee1f6-965e-4d6d-a520-77b2f472b164","Type":"ContainerStarted","Data":"ac3e8569242cc61c4781e18827bfb01c17c7df5fbd56e4e0f24b8d018e91bcaa"} Jan 28 16:41:04 crc kubenswrapper[4877]: I0128 16:41:04.290147 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:04 crc kubenswrapper[4877]: I0128 16:41:04.295124 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 16:41:04 crc kubenswrapper[4877]: I0128 16:41:04.311666 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podStartSLOduration=3.311642564 podStartE2EDuration="3.311642564s" podCreationTimestamp="2026-01-28 16:41:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:41:04.310675767 +0000 UTC m=+367.869002655" watchObservedRunningTime="2026-01-28 16:41:04.311642564 +0000 UTC m=+367.869969462" Jan 28 16:41:07 crc kubenswrapper[4877]: I0128 16:41:07.077015 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:41:07 crc kubenswrapper[4877]: I0128 16:41:07.077608 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:41:15 crc kubenswrapper[4877]: I0128 16:41:15.139273 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" Jan 28 16:41:15 crc kubenswrapper[4877]: I0128 16:41:15.219573 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzc7h"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.784279 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sszxr"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.785283 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sszxr" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="registry-server" containerID="cri-o://94c34b88600d9843d390c37a3ea3fcc00126bcf6131176e29b76b64f2f852a37" gracePeriod=30 Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.799609 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-prvss"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.800982 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-prvss" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="registry-server" containerID="cri-o://8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14" gracePeriod=30 Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.815177 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4vk27"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.815493 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" containerID="cri-o://f76f59a26b35bdff1adb4870f156ce2a0fa1038f5ae7b97fd3b42464dd6f51d8" gracePeriod=30 Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.818934 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2vhl"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.819196 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d2vhl" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="registry-server" containerID="cri-o://278c64682b2e1bc0ee33815f5dfc2dfc0050ab186b9c1a381b751eb690f21fab" gracePeriod=30 Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.831466 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vxfw7"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.831753 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vxfw7" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="registry-server" containerID="cri-o://d17573daeb69e8294ee5e32d3d660477680d91b510f48fa49c0a1772ed1c4fa8" gracePeriod=30 Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.843381 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ztt5t"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.844507 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.861550 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ztt5t"] Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.951651 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4lbl\" (UniqueName: \"kubernetes.io/projected/e4d169b3-a547-428e-b407-ea1a018f7a36-kube-api-access-m4lbl\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.951705 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e4d169b3-a547-428e-b407-ea1a018f7a36-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:24 crc kubenswrapper[4877]: I0128 16:41:24.951732 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e4d169b3-a547-428e-b407-ea1a018f7a36-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.053406 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e4d169b3-a547-428e-b407-ea1a018f7a36-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.053564 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4lbl\" (UniqueName: \"kubernetes.io/projected/e4d169b3-a547-428e-b407-ea1a018f7a36-kube-api-access-m4lbl\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.053590 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e4d169b3-a547-428e-b407-ea1a018f7a36-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.055293 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e4d169b3-a547-428e-b407-ea1a018f7a36-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.061013 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e4d169b3-a547-428e-b407-ea1a018f7a36-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.071819 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4lbl\" (UniqueName: \"kubernetes.io/projected/e4d169b3-a547-428e-b407-ea1a018f7a36-kube-api-access-m4lbl\") pod \"marketplace-operator-79b997595-ztt5t\" (UID: \"e4d169b3-a547-428e-b407-ea1a018f7a36\") " pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.161520 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.347435 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.460085 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-catalog-content\") pod \"fa400ca9-c7cc-482b-af01-6743a80710fe\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.460195 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-utilities\") pod \"fa400ca9-c7cc-482b-af01-6743a80710fe\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.460263 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvclq\" (UniqueName: \"kubernetes.io/projected/fa400ca9-c7cc-482b-af01-6743a80710fe-kube-api-access-kvclq\") pod \"fa400ca9-c7cc-482b-af01-6743a80710fe\" (UID: \"fa400ca9-c7cc-482b-af01-6743a80710fe\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.462242 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-utilities" (OuterVolumeSpecName: "utilities") pod "fa400ca9-c7cc-482b-af01-6743a80710fe" (UID: "fa400ca9-c7cc-482b-af01-6743a80710fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.462736 4877 generic.go:334] "Generic (PLEG): container finished" podID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerID="8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14" exitCode=0 Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.462803 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-prvss" event={"ID":"fa400ca9-c7cc-482b-af01-6743a80710fe","Type":"ContainerDied","Data":"8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14"} Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.462845 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-prvss" event={"ID":"fa400ca9-c7cc-482b-af01-6743a80710fe","Type":"ContainerDied","Data":"9fefed3eabfff26ed0cea8955fd0508c1bace142abaed454b0d598d384413bfa"} Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.462871 4877 scope.go:117] "RemoveContainer" containerID="8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.463064 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-prvss" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.481988 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa400ca9-c7cc-482b-af01-6743a80710fe-kube-api-access-kvclq" (OuterVolumeSpecName: "kube-api-access-kvclq") pod "fa400ca9-c7cc-482b-af01-6743a80710fe" (UID: "fa400ca9-c7cc-482b-af01-6743a80710fe"). InnerVolumeSpecName "kube-api-access-kvclq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.483928 4877 generic.go:334] "Generic (PLEG): container finished" podID="8d139104-17f5-47de-a21d-08340d961df3" containerID="94c34b88600d9843d390c37a3ea3fcc00126bcf6131176e29b76b64f2f852a37" exitCode=0 Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.484144 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sszxr" event={"ID":"8d139104-17f5-47de-a21d-08340d961df3","Type":"ContainerDied","Data":"94c34b88600d9843d390c37a3ea3fcc00126bcf6131176e29b76b64f2f852a37"} Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.491720 4877 generic.go:334] "Generic (PLEG): container finished" podID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerID="d17573daeb69e8294ee5e32d3d660477680d91b510f48fa49c0a1772ed1c4fa8" exitCode=0 Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.491838 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxfw7" event={"ID":"22e4ba64-0a17-4ea7-8b9c-aa09d864be39","Type":"ContainerDied","Data":"d17573daeb69e8294ee5e32d3d660477680d91b510f48fa49c0a1772ed1c4fa8"} Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.499664 4877 generic.go:334] "Generic (PLEG): container finished" podID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerID="278c64682b2e1bc0ee33815f5dfc2dfc0050ab186b9c1a381b751eb690f21fab" exitCode=0 Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.499770 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2vhl" event={"ID":"4a7c4fb9-52e4-4736-9165-b793c332af0d","Type":"ContainerDied","Data":"278c64682b2e1bc0ee33815f5dfc2dfc0050ab186b9c1a381b751eb690f21fab"} Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.512996 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" event={"ID":"6a92f67e-224e-40a8-893d-edbe8dad2036","Type":"ContainerDied","Data":"f76f59a26b35bdff1adb4870f156ce2a0fa1038f5ae7b97fd3b42464dd6f51d8"} Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.512988 4877 generic.go:334] "Generic (PLEG): container finished" podID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerID="f76f59a26b35bdff1adb4870f156ce2a0fa1038f5ae7b97fd3b42464dd6f51d8" exitCode=0 Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.518342 4877 scope.go:117] "RemoveContainer" containerID="2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.539960 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.541210 4877 scope.go:117] "RemoveContainer" containerID="da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.547387 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.568874 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-catalog-content\") pod \"8d139104-17f5-47de-a21d-08340d961df3\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.568928 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-utilities\") pod \"8d139104-17f5-47de-a21d-08340d961df3\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.568982 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq772\" (UniqueName: \"kubernetes.io/projected/8d139104-17f5-47de-a21d-08340d961df3-kube-api-access-cq772\") pod \"8d139104-17f5-47de-a21d-08340d961df3\" (UID: \"8d139104-17f5-47de-a21d-08340d961df3\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.569688 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqdqb\" (UniqueName: \"kubernetes.io/projected/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-kube-api-access-bqdqb\") pod \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.569801 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-utilities\") pod \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.569884 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-catalog-content\") pod \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\" (UID: \"22e4ba64-0a17-4ea7-8b9c-aa09d864be39\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.570428 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvclq\" (UniqueName: \"kubernetes.io/projected/fa400ca9-c7cc-482b-af01-6743a80710fe-kube-api-access-kvclq\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.570445 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.574221 4877 scope.go:117] "RemoveContainer" containerID="8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14" Jan 28 16:41:25 crc kubenswrapper[4877]: E0128 16:41:25.575096 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14\": container with ID starting with 8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14 not found: ID does not exist" containerID="8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.575143 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14"} err="failed to get container status \"8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14\": rpc error: code = NotFound desc = could not find container \"8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14\": container with ID starting with 8291659040b753d9c1d2bf6a93edce02f963d622dafd441592a70f78316bea14 not found: ID does not exist" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.577096 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-utilities" (OuterVolumeSpecName: "utilities") pod "8d139104-17f5-47de-a21d-08340d961df3" (UID: "8d139104-17f5-47de-a21d-08340d961df3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.577647 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-utilities" (OuterVolumeSpecName: "utilities") pod "22e4ba64-0a17-4ea7-8b9c-aa09d864be39" (UID: "22e4ba64-0a17-4ea7-8b9c-aa09d864be39"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.577829 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d139104-17f5-47de-a21d-08340d961df3-kube-api-access-cq772" (OuterVolumeSpecName: "kube-api-access-cq772") pod "8d139104-17f5-47de-a21d-08340d961df3" (UID: "8d139104-17f5-47de-a21d-08340d961df3"). InnerVolumeSpecName "kube-api-access-cq772". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.575180 4877 scope.go:117] "RemoveContainer" containerID="2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27" Jan 28 16:41:25 crc kubenswrapper[4877]: E0128 16:41:25.578666 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27\": container with ID starting with 2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27 not found: ID does not exist" containerID="2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.578701 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27"} err="failed to get container status \"2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27\": rpc error: code = NotFound desc = could not find container \"2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27\": container with ID starting with 2fd6821929b6767f9c79983614831353ae16da66cf26693824c827ec34970f27 not found: ID does not exist" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.578725 4877 scope.go:117] "RemoveContainer" containerID="da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.579522 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.582611 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-kube-api-access-bqdqb" (OuterVolumeSpecName: "kube-api-access-bqdqb") pod "22e4ba64-0a17-4ea7-8b9c-aa09d864be39" (UID: "22e4ba64-0a17-4ea7-8b9c-aa09d864be39"). InnerVolumeSpecName "kube-api-access-bqdqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.594466 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa400ca9-c7cc-482b-af01-6743a80710fe" (UID: "fa400ca9-c7cc-482b-af01-6743a80710fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: E0128 16:41:25.595037 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d\": container with ID starting with da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d not found: ID does not exist" containerID="da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.595090 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d"} err="failed to get container status \"da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d\": rpc error: code = NotFound desc = could not find container \"da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d\": container with ID starting with da8d3eb10e1942ab7188f6d5f3b49658d08145201a253415184f19b6bfc3b33d not found: ID does not exist" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.595154 4877 scope.go:117] "RemoveContainer" containerID="dbf9d3e7d5bbaf2e290fbb65cc7684e1af3f3a60eeffb7ae0b777c27af2ceadd" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.599073 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.651368 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d139104-17f5-47de-a21d-08340d961df3" (UID: "8d139104-17f5-47de-a21d-08340d961df3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.672075 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-utilities\") pod \"4a7c4fb9-52e4-4736-9165-b793c332af0d\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.672316 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-operator-metrics\") pod \"6a92f67e-224e-40a8-893d-edbe8dad2036\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.672362 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fm9m\" (UniqueName: \"kubernetes.io/projected/4a7c4fb9-52e4-4736-9165-b793c332af0d-kube-api-access-7fm9m\") pod \"4a7c4fb9-52e4-4736-9165-b793c332af0d\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.672407 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2k8j\" (UniqueName: \"kubernetes.io/projected/6a92f67e-224e-40a8-893d-edbe8dad2036-kube-api-access-v2k8j\") pod \"6a92f67e-224e-40a8-893d-edbe8dad2036\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.672497 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-catalog-content\") pod \"4a7c4fb9-52e4-4736-9165-b793c332af0d\" (UID: \"4a7c4fb9-52e4-4736-9165-b793c332af0d\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.672531 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-trusted-ca\") pod \"6a92f67e-224e-40a8-893d-edbe8dad2036\" (UID: \"6a92f67e-224e-40a8-893d-edbe8dad2036\") " Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674036 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-utilities" (OuterVolumeSpecName: "utilities") pod "4a7c4fb9-52e4-4736-9165-b793c332af0d" (UID: "4a7c4fb9-52e4-4736-9165-b793c332af0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674666 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674691 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d139104-17f5-47de-a21d-08340d961df3-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674703 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq772\" (UniqueName: \"kubernetes.io/projected/8d139104-17f5-47de-a21d-08340d961df3-kube-api-access-cq772\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674719 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqdqb\" (UniqueName: \"kubernetes.io/projected/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-kube-api-access-bqdqb\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674729 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674739 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.674749 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa400ca9-c7cc-482b-af01-6743a80710fe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.675109 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a7c4fb9-52e4-4736-9165-b793c332af0d-kube-api-access-7fm9m" (OuterVolumeSpecName: "kube-api-access-7fm9m") pod "4a7c4fb9-52e4-4736-9165-b793c332af0d" (UID: "4a7c4fb9-52e4-4736-9165-b793c332af0d"). InnerVolumeSpecName "kube-api-access-7fm9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.675436 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "6a92f67e-224e-40a8-893d-edbe8dad2036" (UID: "6a92f67e-224e-40a8-893d-edbe8dad2036"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.676164 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a92f67e-224e-40a8-893d-edbe8dad2036-kube-api-access-v2k8j" (OuterVolumeSpecName: "kube-api-access-v2k8j") pod "6a92f67e-224e-40a8-893d-edbe8dad2036" (UID: "6a92f67e-224e-40a8-893d-edbe8dad2036"). InnerVolumeSpecName "kube-api-access-v2k8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.676458 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "6a92f67e-224e-40a8-893d-edbe8dad2036" (UID: "6a92f67e-224e-40a8-893d-edbe8dad2036"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.696751 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a7c4fb9-52e4-4736-9165-b793c332af0d" (UID: "4a7c4fb9-52e4-4736-9165-b793c332af0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.717752 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "22e4ba64-0a17-4ea7-8b9c-aa09d864be39" (UID: "22e4ba64-0a17-4ea7-8b9c-aa09d864be39"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.775906 4877 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.775944 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22e4ba64-0a17-4ea7-8b9c-aa09d864be39-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.775954 4877 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6a92f67e-224e-40a8-893d-edbe8dad2036-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.775964 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fm9m\" (UniqueName: \"kubernetes.io/projected/4a7c4fb9-52e4-4736-9165-b793c332af0d-kube-api-access-7fm9m\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.775973 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2k8j\" (UniqueName: \"kubernetes.io/projected/6a92f67e-224e-40a8-893d-edbe8dad2036-kube-api-access-v2k8j\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.775981 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a7c4fb9-52e4-4736-9165-b793c332af0d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.780451 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ztt5t"] Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.800629 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-prvss"] Jan 28 16:41:25 crc kubenswrapper[4877]: I0128 16:41:25.803698 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-prvss"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.534691 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sszxr" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.534689 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sszxr" event={"ID":"8d139104-17f5-47de-a21d-08340d961df3","Type":"ContainerDied","Data":"7c72810bfa3fe755db96251cd41729804708bd209c7e59a8dd3b5484e68f6e46"} Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.535253 4877 scope.go:117] "RemoveContainer" containerID="94c34b88600d9843d390c37a3ea3fcc00126bcf6131176e29b76b64f2f852a37" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.540552 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" event={"ID":"e4d169b3-a547-428e-b407-ea1a018f7a36","Type":"ContainerStarted","Data":"1c14385d82017db18d6e19e94d12180ed820eabfb6dd834649a3f5173b8b5e19"} Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.540611 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" event={"ID":"e4d169b3-a547-428e-b407-ea1a018f7a36","Type":"ContainerStarted","Data":"f8e2ae535da4cb9673f5f9bdf43127d04a101c1f9c1f56b8b37c5e5cbebf15da"} Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.541610 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.546303 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.548219 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxfw7" event={"ID":"22e4ba64-0a17-4ea7-8b9c-aa09d864be39","Type":"ContainerDied","Data":"fbc5ab71c9848ba1993c519ca523147becb07cd173e896d994666e33d207887c"} Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.548255 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxfw7" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.557759 4877 scope.go:117] "RemoveContainer" containerID="39c1b4974ec248afa76605554d29a03c790d1d868a2ec3c008b621b8391e57bb" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.566317 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2vhl" event={"ID":"4a7c4fb9-52e4-4736-9165-b793c332af0d","Type":"ContainerDied","Data":"917adc26030adbd57533c9e83bfcf8a1c3e4804bfb662205e5f775ce20a002e2"} Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.566787 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2vhl" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.569256 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" podStartSLOduration=2.569222674 podStartE2EDuration="2.569222674s" podCreationTimestamp="2026-01-28 16:41:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:41:26.5661997 +0000 UTC m=+390.124526588" watchObservedRunningTime="2026-01-28 16:41:26.569222674 +0000 UTC m=+390.127549562" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.577319 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" event={"ID":"6a92f67e-224e-40a8-893d-edbe8dad2036","Type":"ContainerDied","Data":"d94253d912f1bd972c05e0096e642e5723fbeda80e2d809a4b02287933c40e23"} Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.577421 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4vk27" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.594611 4877 scope.go:117] "RemoveContainer" containerID="e2a2591e6df0223533c5b5d0af67cf2a920b1cf1752bc415d01141985cd56390" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.638253 4877 scope.go:117] "RemoveContainer" containerID="d17573daeb69e8294ee5e32d3d660477680d91b510f48fa49c0a1772ed1c4fa8" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.641076 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sszxr"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.643826 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sszxr"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.660853 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vxfw7"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.664911 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vxfw7"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.666136 4877 scope.go:117] "RemoveContainer" containerID="3153a3e344d9547ece00c827f28c46ad0fc5c55b3aca0fa5f4bc3c080c7f4330" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.690195 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4vk27"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.693053 4877 scope.go:117] "RemoveContainer" containerID="077892604d74eb280d0080cadc1c8db37de5076c4ff07ccee60097f874caa07f" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.696758 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4vk27"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.700564 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2vhl"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.705721 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2vhl"] Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.706629 4877 scope.go:117] "RemoveContainer" containerID="278c64682b2e1bc0ee33815f5dfc2dfc0050ab186b9c1a381b751eb690f21fab" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.723174 4877 scope.go:117] "RemoveContainer" containerID="45fce4c53b20b248ab842b9f0b77528ef18fc78c2e58f41ebbb379b6625b3663" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.736586 4877 scope.go:117] "RemoveContainer" containerID="6ed33e262f684582244fad98f3992f33fe9aa017f18232b41ebe43a2f64b7204" Jan 28 16:41:26 crc kubenswrapper[4877]: I0128 16:41:26.753829 4877 scope.go:117] "RemoveContainer" containerID="f76f59a26b35bdff1adb4870f156ce2a0fa1038f5ae7b97fd3b42464dd6f51d8" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213181 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8q65r"] Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213595 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213625 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213647 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213663 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213683 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213735 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213763 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213778 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213802 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213816 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213843 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213857 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213877 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213891 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213910 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213924 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213947 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.213964 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.213988 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214003 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.214029 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214044 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="extract-utilities" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.214065 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214080 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="extract-content" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.214105 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214122 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214315 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d139104-17f5-47de-a21d-08340d961df3" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214345 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214366 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214391 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214423 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" containerName="registry-server" Jan 28 16:41:27 crc kubenswrapper[4877]: E0128 16:41:27.214672 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214695 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.214932 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" containerName="marketplace-operator" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.218969 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.219527 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8q65r"] Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.221999 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.298697 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48qzs\" (UniqueName: \"kubernetes.io/projected/d0599e47-e131-43e4-a9f4-f362b888c964-kube-api-access-48qzs\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.298785 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0599e47-e131-43e4-a9f4-f362b888c964-utilities\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.299068 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0599e47-e131-43e4-a9f4-f362b888c964-catalog-content\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.337590 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22e4ba64-0a17-4ea7-8b9c-aa09d864be39" path="/var/lib/kubelet/pods/22e4ba64-0a17-4ea7-8b9c-aa09d864be39/volumes" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.338201 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a7c4fb9-52e4-4736-9165-b793c332af0d" path="/var/lib/kubelet/pods/4a7c4fb9-52e4-4736-9165-b793c332af0d/volumes" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.339173 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a92f67e-224e-40a8-893d-edbe8dad2036" path="/var/lib/kubelet/pods/6a92f67e-224e-40a8-893d-edbe8dad2036/volumes" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.340659 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d139104-17f5-47de-a21d-08340d961df3" path="/var/lib/kubelet/pods/8d139104-17f5-47de-a21d-08340d961df3/volumes" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.341564 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa400ca9-c7cc-482b-af01-6743a80710fe" path="/var/lib/kubelet/pods/fa400ca9-c7cc-482b-af01-6743a80710fe/volumes" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.400031 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0599e47-e131-43e4-a9f4-f362b888c964-catalog-content\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.400344 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48qzs\" (UniqueName: \"kubernetes.io/projected/d0599e47-e131-43e4-a9f4-f362b888c964-kube-api-access-48qzs\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.400542 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0599e47-e131-43e4-a9f4-f362b888c964-utilities\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.400544 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0599e47-e131-43e4-a9f4-f362b888c964-catalog-content\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.400734 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0599e47-e131-43e4-a9f4-f362b888c964-utilities\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.423522 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48qzs\" (UniqueName: \"kubernetes.io/projected/d0599e47-e131-43e4-a9f4-f362b888c964-kube-api-access-48qzs\") pod \"certified-operators-8q65r\" (UID: \"d0599e47-e131-43e4-a9f4-f362b888c964\") " pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.540347 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:27 crc kubenswrapper[4877]: I0128 16:41:27.957659 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8q65r"] Jan 28 16:41:28 crc kubenswrapper[4877]: I0128 16:41:28.609661 4877 generic.go:334] "Generic (PLEG): container finished" podID="d0599e47-e131-43e4-a9f4-f362b888c964" containerID="12e88404dc814d35c36aa046dd6a7ed13e38f95968fd7f585baef26194f5be7b" exitCode=0 Jan 28 16:41:28 crc kubenswrapper[4877]: I0128 16:41:28.609780 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8q65r" event={"ID":"d0599e47-e131-43e4-a9f4-f362b888c964","Type":"ContainerDied","Data":"12e88404dc814d35c36aa046dd6a7ed13e38f95968fd7f585baef26194f5be7b"} Jan 28 16:41:28 crc kubenswrapper[4877]: I0128 16:41:28.610282 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8q65r" event={"ID":"d0599e47-e131-43e4-a9f4-f362b888c964","Type":"ContainerStarted","Data":"07c60bcb42b6e5a4276c59c62a6d77d612cbae1e242fe8a9a9dc64344d0a262b"} Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.004347 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-psvnx"] Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.005750 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.007934 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.020678 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-psvnx"] Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.124246 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ef78e1f-5284-49b7-90c3-58941fbfa168-utilities\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.124301 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btktt\" (UniqueName: \"kubernetes.io/projected/8ef78e1f-5284-49b7-90c3-58941fbfa168-kube-api-access-btktt\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.124426 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ef78e1f-5284-49b7-90c3-58941fbfa168-catalog-content\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.225975 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ef78e1f-5284-49b7-90c3-58941fbfa168-catalog-content\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.226098 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ef78e1f-5284-49b7-90c3-58941fbfa168-utilities\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.226153 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btktt\" (UniqueName: \"kubernetes.io/projected/8ef78e1f-5284-49b7-90c3-58941fbfa168-kube-api-access-btktt\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.226550 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ef78e1f-5284-49b7-90c3-58941fbfa168-catalog-content\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.226689 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ef78e1f-5284-49b7-90c3-58941fbfa168-utilities\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.252044 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btktt\" (UniqueName: \"kubernetes.io/projected/8ef78e1f-5284-49b7-90c3-58941fbfa168-kube-api-access-btktt\") pod \"redhat-operators-psvnx\" (UID: \"8ef78e1f-5284-49b7-90c3-58941fbfa168\") " pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.328444 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.601863 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-98q9v"] Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.603416 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.607069 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.617653 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-98q9v"] Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.621280 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8q65r" event={"ID":"d0599e47-e131-43e4-a9f4-f362b888c964","Type":"ContainerStarted","Data":"435fae32ea3f2c81c35b82da4f50364f94960020571d49f2c92f21f4f69be76e"} Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.636341 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-catalog-content\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.636395 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbb4z\" (UniqueName: \"kubernetes.io/projected/5aab0675-0c41-459e-aa67-b47ad5190813-kube-api-access-mbb4z\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.636495 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-utilities\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.737333 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-catalog-content\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.737392 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbb4z\" (UniqueName: \"kubernetes.io/projected/5aab0675-0c41-459e-aa67-b47ad5190813-kube-api-access-mbb4z\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.737487 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-utilities\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.738125 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-utilities\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.738338 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-catalog-content\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.763974 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbb4z\" (UniqueName: \"kubernetes.io/projected/5aab0675-0c41-459e-aa67-b47ad5190813-kube-api-access-mbb4z\") pod \"community-operators-98q9v\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.776468 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-psvnx"] Jan 28 16:41:29 crc kubenswrapper[4877]: I0128 16:41:29.965509 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.389924 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-98q9v"] Jan 28 16:41:30 crc kubenswrapper[4877]: W0128 16:41:30.398926 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5aab0675_0c41_459e_aa67_b47ad5190813.slice/crio-51870b09de7db9984506ea9bc4938e5e753538cc5a55050bf107ad88cefb815c WatchSource:0}: Error finding container 51870b09de7db9984506ea9bc4938e5e753538cc5a55050bf107ad88cefb815c: Status 404 returned error can't find the container with id 51870b09de7db9984506ea9bc4938e5e753538cc5a55050bf107ad88cefb815c Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.627643 4877 generic.go:334] "Generic (PLEG): container finished" podID="8ef78e1f-5284-49b7-90c3-58941fbfa168" containerID="98a5c7b9888748847f419f6d1dd6db0c335147274ea5bab9dcd6f2cfe12d9124" exitCode=0 Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.627725 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-psvnx" event={"ID":"8ef78e1f-5284-49b7-90c3-58941fbfa168","Type":"ContainerDied","Data":"98a5c7b9888748847f419f6d1dd6db0c335147274ea5bab9dcd6f2cfe12d9124"} Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.628096 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-psvnx" event={"ID":"8ef78e1f-5284-49b7-90c3-58941fbfa168","Type":"ContainerStarted","Data":"972dba4855162946ce6618566ce0dbfcbb131c6bf8d2e75abc0921d4e2e42f52"} Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.631844 4877 generic.go:334] "Generic (PLEG): container finished" podID="d0599e47-e131-43e4-a9f4-f362b888c964" containerID="435fae32ea3f2c81c35b82da4f50364f94960020571d49f2c92f21f4f69be76e" exitCode=0 Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.631937 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8q65r" event={"ID":"d0599e47-e131-43e4-a9f4-f362b888c964","Type":"ContainerDied","Data":"435fae32ea3f2c81c35b82da4f50364f94960020571d49f2c92f21f4f69be76e"} Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.635841 4877 generic.go:334] "Generic (PLEG): container finished" podID="5aab0675-0c41-459e-aa67-b47ad5190813" containerID="12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50" exitCode=0 Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.635925 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98q9v" event={"ID":"5aab0675-0c41-459e-aa67-b47ad5190813","Type":"ContainerDied","Data":"12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50"} Jan 28 16:41:30 crc kubenswrapper[4877]: I0128 16:41:30.635962 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98q9v" event={"ID":"5aab0675-0c41-459e-aa67-b47ad5190813","Type":"ContainerStarted","Data":"51870b09de7db9984506ea9bc4938e5e753538cc5a55050bf107ad88cefb815c"} Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.400418 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fp9w8"] Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.404505 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.408107 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.424304 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fp9w8"] Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.561642 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782acd7-6ba8-4909-94e9-5005fd637272-catalog-content\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.561690 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782acd7-6ba8-4909-94e9-5005fd637272-utilities\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.561754 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rtdc\" (UniqueName: \"kubernetes.io/projected/a782acd7-6ba8-4909-94e9-5005fd637272-kube-api-access-6rtdc\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.642751 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-psvnx" event={"ID":"8ef78e1f-5284-49b7-90c3-58941fbfa168","Type":"ContainerStarted","Data":"0f0a4e8454e503e97cf5cbe6ebd6ed1fb3880f0f29fd05ce686bda3cbaf6ae14"} Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.645385 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8q65r" event={"ID":"d0599e47-e131-43e4-a9f4-f362b888c964","Type":"ContainerStarted","Data":"ec4f3f0a2572403bb215ad60861c1df5be7acf211f4bd96613805be3c3de411d"} Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.663536 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782acd7-6ba8-4909-94e9-5005fd637272-catalog-content\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.663606 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782acd7-6ba8-4909-94e9-5005fd637272-utilities\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.663652 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rtdc\" (UniqueName: \"kubernetes.io/projected/a782acd7-6ba8-4909-94e9-5005fd637272-kube-api-access-6rtdc\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.664144 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a782acd7-6ba8-4909-94e9-5005fd637272-utilities\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.664180 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a782acd7-6ba8-4909-94e9-5005fd637272-catalog-content\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.696818 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rtdc\" (UniqueName: \"kubernetes.io/projected/a782acd7-6ba8-4909-94e9-5005fd637272-kube-api-access-6rtdc\") pod \"redhat-marketplace-fp9w8\" (UID: \"a782acd7-6ba8-4909-94e9-5005fd637272\") " pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.702089 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8q65r" podStartSLOduration=2.1522229 podStartE2EDuration="4.702069598s" podCreationTimestamp="2026-01-28 16:41:27 +0000 UTC" firstStartedPulling="2026-01-28 16:41:28.613885073 +0000 UTC m=+392.172211961" lastFinishedPulling="2026-01-28 16:41:31.163731731 +0000 UTC m=+394.722058659" observedRunningTime="2026-01-28 16:41:31.700581884 +0000 UTC m=+395.258908772" watchObservedRunningTime="2026-01-28 16:41:31.702069598 +0000 UTC m=+395.260396486" Jan 28 16:41:31 crc kubenswrapper[4877]: I0128 16:41:31.740632 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.278375 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fp9w8"] Jan 28 16:41:32 crc kubenswrapper[4877]: W0128 16:41:32.306015 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda782acd7_6ba8_4909_94e9_5005fd637272.slice/crio-bdeb3822ef0f9a69b03e95ed2e010c9dc0b4917461048557f59a76671c75b6f1 WatchSource:0}: Error finding container bdeb3822ef0f9a69b03e95ed2e010c9dc0b4917461048557f59a76671c75b6f1: Status 404 returned error can't find the container with id bdeb3822ef0f9a69b03e95ed2e010c9dc0b4917461048557f59a76671c75b6f1 Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.657773 4877 generic.go:334] "Generic (PLEG): container finished" podID="5aab0675-0c41-459e-aa67-b47ad5190813" containerID="2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263" exitCode=0 Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.657865 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98q9v" event={"ID":"5aab0675-0c41-459e-aa67-b47ad5190813","Type":"ContainerDied","Data":"2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263"} Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.661895 4877 generic.go:334] "Generic (PLEG): container finished" podID="8ef78e1f-5284-49b7-90c3-58941fbfa168" containerID="0f0a4e8454e503e97cf5cbe6ebd6ed1fb3880f0f29fd05ce686bda3cbaf6ae14" exitCode=0 Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.661935 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-psvnx" event={"ID":"8ef78e1f-5284-49b7-90c3-58941fbfa168","Type":"ContainerDied","Data":"0f0a4e8454e503e97cf5cbe6ebd6ed1fb3880f0f29fd05ce686bda3cbaf6ae14"} Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.667080 4877 generic.go:334] "Generic (PLEG): container finished" podID="a782acd7-6ba8-4909-94e9-5005fd637272" containerID="f40e27587c91b034fd650153d74f7941dd9cd72648b5e7fbfad53c838c0244e0" exitCode=0 Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.667173 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fp9w8" event={"ID":"a782acd7-6ba8-4909-94e9-5005fd637272","Type":"ContainerDied","Data":"f40e27587c91b034fd650153d74f7941dd9cd72648b5e7fbfad53c838c0244e0"} Jan 28 16:41:32 crc kubenswrapper[4877]: I0128 16:41:32.667220 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fp9w8" event={"ID":"a782acd7-6ba8-4909-94e9-5005fd637272","Type":"ContainerStarted","Data":"bdeb3822ef0f9a69b03e95ed2e010c9dc0b4917461048557f59a76671c75b6f1"} Jan 28 16:41:33 crc kubenswrapper[4877]: I0128 16:41:33.674832 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-psvnx" event={"ID":"8ef78e1f-5284-49b7-90c3-58941fbfa168","Type":"ContainerStarted","Data":"60e56efceacf187d9440648afd4b11d5a13780174727350c65532840c5fd94ab"} Jan 28 16:41:33 crc kubenswrapper[4877]: I0128 16:41:33.676373 4877 generic.go:334] "Generic (PLEG): container finished" podID="a782acd7-6ba8-4909-94e9-5005fd637272" containerID="2905bf9542dd765e1e9252bf5e46482af3a421f8743fbf3b1b6902ef45391574" exitCode=0 Jan 28 16:41:33 crc kubenswrapper[4877]: I0128 16:41:33.676450 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fp9w8" event={"ID":"a782acd7-6ba8-4909-94e9-5005fd637272","Type":"ContainerDied","Data":"2905bf9542dd765e1e9252bf5e46482af3a421f8743fbf3b1b6902ef45391574"} Jan 28 16:41:33 crc kubenswrapper[4877]: I0128 16:41:33.679855 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98q9v" event={"ID":"5aab0675-0c41-459e-aa67-b47ad5190813","Type":"ContainerStarted","Data":"b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046"} Jan 28 16:41:33 crc kubenswrapper[4877]: I0128 16:41:33.695505 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-psvnx" podStartSLOduration=3.2531342260000002 podStartE2EDuration="5.69546128s" podCreationTimestamp="2026-01-28 16:41:28 +0000 UTC" firstStartedPulling="2026-01-28 16:41:30.629729833 +0000 UTC m=+394.188056721" lastFinishedPulling="2026-01-28 16:41:33.072056887 +0000 UTC m=+396.630383775" observedRunningTime="2026-01-28 16:41:33.692517693 +0000 UTC m=+397.250844581" watchObservedRunningTime="2026-01-28 16:41:33.69546128 +0000 UTC m=+397.253788168" Jan 28 16:41:33 crc kubenswrapper[4877]: I0128 16:41:33.729292 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-98q9v" podStartSLOduration=2.331223565 podStartE2EDuration="4.729269171s" podCreationTimestamp="2026-01-28 16:41:29 +0000 UTC" firstStartedPulling="2026-01-28 16:41:30.63786085 +0000 UTC m=+394.196187738" lastFinishedPulling="2026-01-28 16:41:33.035906456 +0000 UTC m=+396.594233344" observedRunningTime="2026-01-28 16:41:33.725279785 +0000 UTC m=+397.283606673" watchObservedRunningTime="2026-01-28 16:41:33.729269171 +0000 UTC m=+397.287596059" Jan 28 16:41:34 crc kubenswrapper[4877]: I0128 16:41:34.708204 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fp9w8" event={"ID":"a782acd7-6ba8-4909-94e9-5005fd637272","Type":"ContainerStarted","Data":"bf5d152a906a70f5ebbda83fbe67f8ff140e1f8e2084a515764e326e97341a27"} Jan 28 16:41:34 crc kubenswrapper[4877]: I0128 16:41:34.733045 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fp9w8" podStartSLOduration=2.314821041 podStartE2EDuration="3.733024245s" podCreationTimestamp="2026-01-28 16:41:31 +0000 UTC" firstStartedPulling="2026-01-28 16:41:32.668233158 +0000 UTC m=+396.226560046" lastFinishedPulling="2026-01-28 16:41:34.086436362 +0000 UTC m=+397.644763250" observedRunningTime="2026-01-28 16:41:34.73013184 +0000 UTC m=+398.288458728" watchObservedRunningTime="2026-01-28 16:41:34.733024245 +0000 UTC m=+398.291351133" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.076550 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.076921 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.076971 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.077717 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"163e625204e85fa60aefc636260cc789258eff00206d927c91d05b2e7e892ef9"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.077797 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://163e625204e85fa60aefc636260cc789258eff00206d927c91d05b2e7e892ef9" gracePeriod=600 Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.540815 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.542423 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.601902 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.729223 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="163e625204e85fa60aefc636260cc789258eff00206d927c91d05b2e7e892ef9" exitCode=0 Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.729629 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"163e625204e85fa60aefc636260cc789258eff00206d927c91d05b2e7e892ef9"} Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.729727 4877 scope.go:117] "RemoveContainer" containerID="16523862c893447ea40311b61d25aaa027b640dc1afacf72ebbd0d12d2b293cb" Jan 28 16:41:37 crc kubenswrapper[4877]: I0128 16:41:37.773864 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8q65r" Jan 28 16:41:38 crc kubenswrapper[4877]: I0128 16:41:38.737701 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"f87fd390161fe2cfa7f8b535b8c64bf410aed6149ff27de8dbf9c3d787641d32"} Jan 28 16:41:39 crc kubenswrapper[4877]: I0128 16:41:39.328952 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:39 crc kubenswrapper[4877]: I0128 16:41:39.329366 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:39 crc kubenswrapper[4877]: I0128 16:41:39.381811 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:39 crc kubenswrapper[4877]: I0128 16:41:39.804087 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-psvnx" Jan 28 16:41:39 crc kubenswrapper[4877]: I0128 16:41:39.967087 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:39 crc kubenswrapper[4877]: I0128 16:41:39.969459 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.030203 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.269363 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" podUID="3d4b7ce0-783a-44b4-9604-8ef0d398fec7" containerName="registry" containerID="cri-o://4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039" gracePeriod=30 Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.719740 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.755817 4877 generic.go:334] "Generic (PLEG): container finished" podID="3d4b7ce0-783a-44b4-9604-8ef0d398fec7" containerID="4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039" exitCode=0 Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.755871 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.755909 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" event={"ID":"3d4b7ce0-783a-44b4-9604-8ef0d398fec7","Type":"ContainerDied","Data":"4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039"} Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.756004 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kzc7h" event={"ID":"3d4b7ce0-783a-44b4-9604-8ef0d398fec7","Type":"ContainerDied","Data":"f703bfafa2e1fd1c7931f27789a95b3bcb62f15e2a9299f79e3c60a1ae86b063"} Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.756028 4877 scope.go:117] "RemoveContainer" containerID="4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.774280 4877 scope.go:117] "RemoveContainer" containerID="4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039" Jan 28 16:41:40 crc kubenswrapper[4877]: E0128 16:41:40.774838 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039\": container with ID starting with 4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039 not found: ID does not exist" containerID="4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.774883 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039"} err="failed to get container status \"4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039\": rpc error: code = NotFound desc = could not find container \"4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039\": container with ID starting with 4178f21f5feffd2bd980f457e54e98da3e46ca5d15ccab9fd5925923ff4bb039 not found: ID does not exist" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.808896 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904144 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-certificates\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904241 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-bound-sa-token\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904271 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-trusted-ca\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904301 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-tls\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904329 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-ca-trust-extracted\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904610 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904691 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-installation-pull-secrets\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.904767 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b6n4\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-kube-api-access-2b6n4\") pod \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\" (UID: \"3d4b7ce0-783a-44b4-9604-8ef0d398fec7\") " Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.906893 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.908315 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.914785 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.915686 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-kube-api-access-2b6n4" (OuterVolumeSpecName: "kube-api-access-2b6n4") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "kube-api-access-2b6n4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.918525 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.919586 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.927465 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:41:40 crc kubenswrapper[4877]: I0128 16:41:40.934490 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "3d4b7ce0-783a-44b4-9604-8ef0d398fec7" (UID: "3d4b7ce0-783a-44b4-9604-8ef0d398fec7"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.006169 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b6n4\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-kube-api-access-2b6n4\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.006233 4877 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.006254 4877 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.006274 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.006291 4877 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.006307 4877 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.006324 4877 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3d4b7ce0-783a-44b4-9604-8ef0d398fec7-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.091594 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzc7h"] Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.097335 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kzc7h"] Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.344114 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d4b7ce0-783a-44b4-9604-8ef0d398fec7" path="/var/lib/kubelet/pods/3d4b7ce0-783a-44b4-9604-8ef0d398fec7/volumes" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.741112 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.741744 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.794407 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:41 crc kubenswrapper[4877]: I0128 16:41:41.840677 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fp9w8" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.840004 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf"] Jan 28 16:41:57 crc kubenswrapper[4877]: E0128 16:41:57.841155 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4b7ce0-783a-44b4-9604-8ef0d398fec7" containerName="registry" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.841174 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4b7ce0-783a-44b4-9604-8ef0d398fec7" containerName="registry" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.841317 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d4b7ce0-783a-44b4-9604-8ef0d398fec7" containerName="registry" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.841881 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.846314 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.847110 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.848523 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.848919 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.849153 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.856238 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf"] Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.951669 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.951811 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:57 crc kubenswrapper[4877]: I0128 16:41:57.951846 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgb57\" (UniqueName: \"kubernetes.io/projected/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-kube-api-access-hgb57\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.052890 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.052941 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgb57\" (UniqueName: \"kubernetes.io/projected/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-kube-api-access-hgb57\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.052976 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.054508 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.059410 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.069164 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgb57\" (UniqueName: \"kubernetes.io/projected/3afc6e90-f9bc-472d-9ff4-22ac7c96cf30-kube-api-access-hgb57\") pod \"cluster-monitoring-operator-6d5b84845-6kttf\" (UID: \"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.166968 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.621539 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf"] Jan 28 16:41:58 crc kubenswrapper[4877]: I0128 16:41:58.871272 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" event={"ID":"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30","Type":"ContainerStarted","Data":"b29f36a70d704862c17b30c29e06838490a5baf2f4be1cdf3915c00753037825"} Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.834368 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q"] Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.835745 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.839293 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-fhvx7" Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.840083 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.852513 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q"] Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.882727 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" event={"ID":"3afc6e90-f9bc-472d-9ff4-22ac7c96cf30","Type":"ContainerStarted","Data":"54e4990bd1ff6295ccfd1a2ffda8cb20005a21ffb57a457378ad37f7ac3e14ae"} Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.896547 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6kttf" podStartSLOduration=2.314297469 podStartE2EDuration="3.896519636s" podCreationTimestamp="2026-01-28 16:41:57 +0000 UTC" firstStartedPulling="2026-01-28 16:41:58.635854572 +0000 UTC m=+422.194181470" lastFinishedPulling="2026-01-28 16:42:00.218076729 +0000 UTC m=+423.776403637" observedRunningTime="2026-01-28 16:42:00.89457915 +0000 UTC m=+424.452906048" watchObservedRunningTime="2026-01-28 16:42:00.896519636 +0000 UTC m=+424.454846524" Jan 28 16:42:00 crc kubenswrapper[4877]: I0128 16:42:00.993564 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/44b0ea14-238d-4f58-b504-b6375aa5137b-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b824q\" (UID: \"44b0ea14-238d-4f58-b504-b6375aa5137b\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" Jan 28 16:42:01 crc kubenswrapper[4877]: I0128 16:42:01.094953 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/44b0ea14-238d-4f58-b504-b6375aa5137b-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b824q\" (UID: \"44b0ea14-238d-4f58-b504-b6375aa5137b\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" Jan 28 16:42:01 crc kubenswrapper[4877]: I0128 16:42:01.102614 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/44b0ea14-238d-4f58-b504-b6375aa5137b-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-b824q\" (UID: \"44b0ea14-238d-4f58-b504-b6375aa5137b\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" Jan 28 16:42:01 crc kubenswrapper[4877]: I0128 16:42:01.154394 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" Jan 28 16:42:01 crc kubenswrapper[4877]: I0128 16:42:01.585679 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q"] Jan 28 16:42:01 crc kubenswrapper[4877]: W0128 16:42:01.598769 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44b0ea14_238d_4f58_b504_b6375aa5137b.slice/crio-82e7c94a69705fc50894289c9c0dfe52dea44378ba8215638464f5482b0cbc48 WatchSource:0}: Error finding container 82e7c94a69705fc50894289c9c0dfe52dea44378ba8215638464f5482b0cbc48: Status 404 returned error can't find the container with id 82e7c94a69705fc50894289c9c0dfe52dea44378ba8215638464f5482b0cbc48 Jan 28 16:42:01 crc kubenswrapper[4877]: I0128 16:42:01.890788 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" event={"ID":"44b0ea14-238d-4f58-b504-b6375aa5137b","Type":"ContainerStarted","Data":"82e7c94a69705fc50894289c9c0dfe52dea44378ba8215638464f5482b0cbc48"} Jan 28 16:42:03 crc kubenswrapper[4877]: I0128 16:42:03.925560 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" event={"ID":"44b0ea14-238d-4f58-b504-b6375aa5137b","Type":"ContainerStarted","Data":"eefe7531d2a7ee3d670fff2e84adcdb30960b617c3f3cd67a24cfd7013fff151"} Jan 28 16:42:03 crc kubenswrapper[4877]: I0128 16:42:03.926104 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" Jan 28 16:42:03 crc kubenswrapper[4877]: I0128 16:42:03.936131 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" Jan 28 16:42:03 crc kubenswrapper[4877]: I0128 16:42:03.955700 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" podStartSLOduration=2.549138409 podStartE2EDuration="3.955665021s" podCreationTimestamp="2026-01-28 16:42:00 +0000 UTC" firstStartedPulling="2026-01-28 16:42:01.603063989 +0000 UTC m=+425.161390887" lastFinishedPulling="2026-01-28 16:42:03.009590611 +0000 UTC m=+426.567917499" observedRunningTime="2026-01-28 16:42:03.945534314 +0000 UTC m=+427.503861192" watchObservedRunningTime="2026-01-28 16:42:03.955665021 +0000 UTC m=+427.513991929" Jan 28 16:42:04 crc kubenswrapper[4877]: I0128 16:42:04.927100 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-zgnvk"] Jan 28 16:42:04 crc kubenswrapper[4877]: I0128 16:42:04.928801 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:04 crc kubenswrapper[4877]: I0128 16:42:04.933950 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Jan 28 16:42:04 crc kubenswrapper[4877]: I0128 16:42:04.934209 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Jan 28 16:42:04 crc kubenswrapper[4877]: I0128 16:42:04.934372 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Jan 28 16:42:04 crc kubenswrapper[4877]: I0128 16:42:04.934627 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-5ffl9" Jan 28 16:42:04 crc kubenswrapper[4877]: I0128 16:42:04.942421 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-zgnvk"] Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.066347 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.066419 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-metrics-client-ca\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.066521 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggvcp\" (UniqueName: \"kubernetes.io/projected/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-kube-api-access-ggvcp\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.066562 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.168208 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.168341 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-metrics-client-ca\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.168405 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggvcp\" (UniqueName: \"kubernetes.io/projected/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-kube-api-access-ggvcp\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.168450 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.171232 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-metrics-client-ca\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.176007 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.176758 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.192331 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggvcp\" (UniqueName: \"kubernetes.io/projected/44b58d0d-d2fb-4319-b689-e4c4b02e52fd-kube-api-access-ggvcp\") pod \"prometheus-operator-db54df47d-zgnvk\" (UID: \"44b58d0d-d2fb-4319-b689-e4c4b02e52fd\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.269774 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.688431 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-zgnvk"] Jan 28 16:42:05 crc kubenswrapper[4877]: W0128 16:42:05.692940 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44b58d0d_d2fb_4319_b689_e4c4b02e52fd.slice/crio-c370250505a7356da681fed3f8d05faed6b4551641a4cc3c6bc4b301360ccf30 WatchSource:0}: Error finding container c370250505a7356da681fed3f8d05faed6b4551641a4cc3c6bc4b301360ccf30: Status 404 returned error can't find the container with id c370250505a7356da681fed3f8d05faed6b4551641a4cc3c6bc4b301360ccf30 Jan 28 16:42:05 crc kubenswrapper[4877]: I0128 16:42:05.939962 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" event={"ID":"44b58d0d-d2fb-4319-b689-e4c4b02e52fd","Type":"ContainerStarted","Data":"c370250505a7356da681fed3f8d05faed6b4551641a4cc3c6bc4b301360ccf30"} Jan 28 16:42:07 crc kubenswrapper[4877]: I0128 16:42:07.954647 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" event={"ID":"44b58d0d-d2fb-4319-b689-e4c4b02e52fd","Type":"ContainerStarted","Data":"2b9b55a07478a8487b74ea281624e8d51ac8235db8570268f7c5091565836c67"} Jan 28 16:42:07 crc kubenswrapper[4877]: I0128 16:42:07.955620 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" event={"ID":"44b58d0d-d2fb-4319-b689-e4c4b02e52fd","Type":"ContainerStarted","Data":"0fa92a7b1d7dc6f46d8232621e45c9b9e321df0077630a495c4dbdc142c27b06"} Jan 28 16:42:07 crc kubenswrapper[4877]: I0128 16:42:07.986342 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-zgnvk" podStartSLOduration=2.650320101 podStartE2EDuration="3.986306312s" podCreationTimestamp="2026-01-28 16:42:04 +0000 UTC" firstStartedPulling="2026-01-28 16:42:05.696828372 +0000 UTC m=+429.255155280" lastFinishedPulling="2026-01-28 16:42:07.032814603 +0000 UTC m=+430.591141491" observedRunningTime="2026-01-28 16:42:07.974332891 +0000 UTC m=+431.532659779" watchObservedRunningTime="2026-01-28 16:42:07.986306312 +0000 UTC m=+431.544633220" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.284908 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm"] Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.286613 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.288658 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.293003 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.293020 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-sqfq4" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.301517 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm"] Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.304977 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5"] Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.306065 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.308513 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.311153 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-48kcj" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.311163 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.311170 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.326709 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5"] Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.432661 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-t9zb7"] Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.433831 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.436590 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-47prf" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.437079 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.437596 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.446769 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/d9e21b18-b3f6-41c5-927a-60793ad970e2-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.446819 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.446875 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/85db93af-62b2-41ea-9d7e-669118689f87-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.446896 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.447032 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/85db93af-62b2-41ea-9d7e-669118689f87-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.447155 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d9e21b18-b3f6-41c5-927a-60793ad970e2-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.447238 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz42m\" (UniqueName: \"kubernetes.io/projected/d9e21b18-b3f6-41c5-927a-60793ad970e2-kube-api-access-sz42m\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.447268 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.447293 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhdsd\" (UniqueName: \"kubernetes.io/projected/85db93af-62b2-41ea-9d7e-669118689f87-kube-api-access-lhdsd\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.447318 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/d9e21b18-b3f6-41c5-927a-60793ad970e2-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549382 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz42m\" (UniqueName: \"kubernetes.io/projected/d9e21b18-b3f6-41c5-927a-60793ad970e2-kube-api-access-sz42m\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549456 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-textfile\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549511 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549626 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhdsd\" (UniqueName: \"kubernetes.io/projected/85db93af-62b2-41ea-9d7e-669118689f87-kube-api-access-lhdsd\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549686 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/d9e21b18-b3f6-41c5-927a-60793ad970e2-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549761 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-tls\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549814 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/d9e21b18-b3f6-41c5-927a-60793ad970e2-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549859 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.549889 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjl69\" (UniqueName: \"kubernetes.io/projected/27428910-4c34-4ab0-8a04-37ccb49d6439-kube-api-access-rjl69\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550131 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-wtmp\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550186 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/85db93af-62b2-41ea-9d7e-669118689f87-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550208 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550235 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/85db93af-62b2-41ea-9d7e-669118689f87-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550258 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550291 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/27428910-4c34-4ab0-8a04-37ccb49d6439-metrics-client-ca\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550320 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-sys\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550351 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d9e21b18-b3f6-41c5-927a-60793ad970e2-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.550369 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-root\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.551690 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/85db93af-62b2-41ea-9d7e-669118689f87-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.552016 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/85db93af-62b2-41ea-9d7e-669118689f87-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.552071 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/d9e21b18-b3f6-41c5-927a-60793ad970e2-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.552860 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.556219 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.556233 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/85db93af-62b2-41ea-9d7e-669118689f87-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.567177 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhdsd\" (UniqueName: \"kubernetes.io/projected/85db93af-62b2-41ea-9d7e-669118689f87-kube-api-access-lhdsd\") pod \"kube-state-metrics-777cb5bd5d-k82m5\" (UID: \"85db93af-62b2-41ea-9d7e-669118689f87\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.570149 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/d9e21b18-b3f6-41c5-927a-60793ad970e2-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.570533 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/d9e21b18-b3f6-41c5-927a-60793ad970e2-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.574225 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz42m\" (UniqueName: \"kubernetes.io/projected/d9e21b18-b3f6-41c5-927a-60793ad970e2-kube-api-access-sz42m\") pod \"openshift-state-metrics-566fddb674-b4qhm\" (UID: \"d9e21b18-b3f6-41c5-927a-60793ad970e2\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.605529 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.622129 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652434 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-sys\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652546 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-root\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652592 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-textfile\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652630 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-tls\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652665 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjl69\" (UniqueName: \"kubernetes.io/projected/27428910-4c34-4ab0-8a04-37ccb49d6439-kube-api-access-rjl69\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652697 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-wtmp\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652720 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.652743 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/27428910-4c34-4ab0-8a04-37ccb49d6439-metrics-client-ca\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.653403 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/27428910-4c34-4ab0-8a04-37ccb49d6439-metrics-client-ca\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.653461 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-sys\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.654724 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-root\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.654806 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-wtmp\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.655225 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-textfile\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.660771 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.660905 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/27428910-4c34-4ab0-8a04-37ccb49d6439-node-exporter-tls\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.671319 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjl69\" (UniqueName: \"kubernetes.io/projected/27428910-4c34-4ab0-8a04-37ccb49d6439-kube-api-access-rjl69\") pod \"node-exporter-t9zb7\" (UID: \"27428910-4c34-4ab0-8a04-37ccb49d6439\") " pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.750373 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-t9zb7" Jan 28 16:42:10 crc kubenswrapper[4877]: I0128 16:42:10.981590 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-t9zb7" event={"ID":"27428910-4c34-4ab0-8a04-37ccb49d6439","Type":"ContainerStarted","Data":"0659f7b6abbac5fcd03b27cc4e5572ccdaf20dd02420849e37c10f46a31778ca"} Jan 28 16:42:11 crc kubenswrapper[4877]: W0128 16:42:11.121333 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85db93af_62b2_41ea_9d7e_669118689f87.slice/crio-bc8ca12face027b421f871a7596ad4ee4071d118fc0198be718a2301a639fdac WatchSource:0}: Error finding container bc8ca12face027b421f871a7596ad4ee4071d118fc0198be718a2301a639fdac: Status 404 returned error can't find the container with id bc8ca12face027b421f871a7596ad4ee4071d118fc0198be718a2301a639fdac Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.121320 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5"] Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.138072 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm"] Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.468355 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.470705 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.475995 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.476341 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.476406 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.476013 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.476352 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.476742 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.477121 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-j4jqx" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.478649 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.492559 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.518646 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667612 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667675 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-config-volume\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667700 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48dd70f6-d859-4318-a5b8-57945d0c3dd1-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667730 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667759 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/48dd70f6-d859-4318-a5b8-57945d0c3dd1-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667803 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zv48\" (UniqueName: \"kubernetes.io/projected/48dd70f6-d859-4318-a5b8-57945d0c3dd1-kube-api-access-8zv48\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667849 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-web-config\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667871 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/48dd70f6-d859-4318-a5b8-57945d0c3dd1-config-out\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667887 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667917 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/48dd70f6-d859-4318-a5b8-57945d0c3dd1-tls-assets\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667941 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/48dd70f6-d859-4318-a5b8-57945d0c3dd1-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.667962 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.768975 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-config-volume\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769027 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48dd70f6-d859-4318-a5b8-57945d0c3dd1-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769053 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769092 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/48dd70f6-d859-4318-a5b8-57945d0c3dd1-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769131 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zv48\" (UniqueName: \"kubernetes.io/projected/48dd70f6-d859-4318-a5b8-57945d0c3dd1-kube-api-access-8zv48\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769167 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-web-config\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769187 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/48dd70f6-d859-4318-a5b8-57945d0c3dd1-config-out\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769203 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769226 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/48dd70f6-d859-4318-a5b8-57945d0c3dd1-tls-assets\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769243 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/48dd70f6-d859-4318-a5b8-57945d0c3dd1-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769263 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.769296 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.776029 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/48dd70f6-d859-4318-a5b8-57945d0c3dd1-config-out\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.777546 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.777635 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/48dd70f6-d859-4318-a5b8-57945d0c3dd1-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.778019 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-config-volume\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.778035 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/48dd70f6-d859-4318-a5b8-57945d0c3dd1-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.779329 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48dd70f6-d859-4318-a5b8-57945d0c3dd1-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.781263 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-web-config\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.782020 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/48dd70f6-d859-4318-a5b8-57945d0c3dd1-tls-assets\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.782392 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.785450 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.801160 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/48dd70f6-d859-4318-a5b8-57945d0c3dd1-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.808376 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zv48\" (UniqueName: \"kubernetes.io/projected/48dd70f6-d859-4318-a5b8-57945d0c3dd1-kube-api-access-8zv48\") pod \"alertmanager-main-0\" (UID: \"48dd70f6-d859-4318-a5b8-57945d0c3dd1\") " pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.990219 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" event={"ID":"85db93af-62b2-41ea-9d7e-669118689f87","Type":"ContainerStarted","Data":"bc8ca12face027b421f871a7596ad4ee4071d118fc0198be718a2301a639fdac"} Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.993881 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" event={"ID":"d9e21b18-b3f6-41c5-927a-60793ad970e2","Type":"ContainerStarted","Data":"caccc3fd9e03ace91a12453594aef84c3fdfc6630afd640a6df50eaea7377858"} Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.993916 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" event={"ID":"d9e21b18-b3f6-41c5-927a-60793ad970e2","Type":"ContainerStarted","Data":"32745ef0dd1fe2a37cfbe76648fb55b862e3d2f80698798ef8b38c4b4e9ee0d7"} Jan 28 16:42:11 crc kubenswrapper[4877]: I0128 16:42:11.993926 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" event={"ID":"d9e21b18-b3f6-41c5-927a-60793ad970e2","Type":"ContainerStarted","Data":"20deb19858f61ec63a9a6bb70ed886a902be8fe105c11ff59932d750ad30d5bc"} Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.092166 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.329070 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx"] Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.331748 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.336861 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-6qr9g" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.337746 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.337848 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.337777 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-fuffc5i1c110o" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.338001 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.338908 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.340657 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.346425 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx"] Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481411 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481550 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-grpc-tls\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481579 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9tdx\" (UniqueName: \"kubernetes.io/projected/88f59584-7374-4487-a7ed-970ea8a838c0-kube-api-access-v9tdx\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481610 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-tls\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481638 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481684 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/88f59584-7374-4487-a7ed-970ea8a838c0-metrics-client-ca\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481716 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.481748 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.582836 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-tls\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.583217 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.583258 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/88f59584-7374-4487-a7ed-970ea8a838c0-metrics-client-ca\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.583286 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.583450 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.583532 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.583618 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-grpc-tls\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.583648 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9tdx\" (UniqueName: \"kubernetes.io/projected/88f59584-7374-4487-a7ed-970ea8a838c0-kube-api-access-v9tdx\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.585002 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/88f59584-7374-4487-a7ed-970ea8a838c0-metrics-client-ca\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.590623 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.591391 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-tls\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.592195 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.592749 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.600680 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-grpc-tls\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.603576 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9tdx\" (UniqueName: \"kubernetes.io/projected/88f59584-7374-4487-a7ed-970ea8a838c0-kube-api-access-v9tdx\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.603993 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/88f59584-7374-4487-a7ed-970ea8a838c0-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-d6bfd8f44-zx8vx\" (UID: \"88f59584-7374-4487-a7ed-970ea8a838c0\") " pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:12 crc kubenswrapper[4877]: I0128 16:42:12.688979 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:13 crc kubenswrapper[4877]: I0128 16:42:13.033814 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" event={"ID":"85db93af-62b2-41ea-9d7e-669118689f87","Type":"ContainerStarted","Data":"fa4d1bd5d242f65116af0246477a8864ff1985403e1b1b87bd37fd42029b4309"} Jan 28 16:42:13 crc kubenswrapper[4877]: I0128 16:42:13.061372 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx"] Jan 28 16:42:13 crc kubenswrapper[4877]: I0128 16:42:13.075620 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-t9zb7" event={"ID":"27428910-4c34-4ab0-8a04-37ccb49d6439","Type":"ContainerStarted","Data":"26109d9f1df6e34e7946b8dfb295fcc29d8442127c223066b66bd03b7b0da76b"} Jan 28 16:42:13 crc kubenswrapper[4877]: I0128 16:42:13.293639 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Jan 28 16:42:13 crc kubenswrapper[4877]: W0128 16:42:13.302610 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48dd70f6_d859_4318_a5b8_57945d0c3dd1.slice/crio-d35e51b5b5b1eb19661e48a46b81015bec4fdcac4a6a6358a3b4b532fe5aeea8 WatchSource:0}: Error finding container d35e51b5b5b1eb19661e48a46b81015bec4fdcac4a6a6358a3b4b532fe5aeea8: Status 404 returned error can't find the container with id d35e51b5b5b1eb19661e48a46b81015bec4fdcac4a6a6358a3b4b532fe5aeea8 Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.085731 4877 generic.go:334] "Generic (PLEG): container finished" podID="27428910-4c34-4ab0-8a04-37ccb49d6439" containerID="26109d9f1df6e34e7946b8dfb295fcc29d8442127c223066b66bd03b7b0da76b" exitCode=0 Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.085804 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-t9zb7" event={"ID":"27428910-4c34-4ab0-8a04-37ccb49d6439","Type":"ContainerDied","Data":"26109d9f1df6e34e7946b8dfb295fcc29d8442127c223066b66bd03b7b0da76b"} Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.089110 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" event={"ID":"88f59584-7374-4487-a7ed-970ea8a838c0","Type":"ContainerStarted","Data":"6edb8383f507c99b4a8a8634d56f4f91d2bc3b814e3a3ff6cdb8de050e6ca94a"} Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.091514 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" event={"ID":"d9e21b18-b3f6-41c5-927a-60793ad970e2","Type":"ContainerStarted","Data":"fd1c311c67ce89efc442c7d29713720f83e8f5c17b871572f0852611c0787942"} Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.094569 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" event={"ID":"85db93af-62b2-41ea-9d7e-669118689f87","Type":"ContainerStarted","Data":"11ab27832c09cc8344781593e78ee58648aacca5670f3435c3991c55c67c3442"} Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.094630 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" event={"ID":"85db93af-62b2-41ea-9d7e-669118689f87","Type":"ContainerStarted","Data":"944ee9d7301b73a2cf2d4792da0a43ef0c5edf4c54e906e5bee75f69318bb253"} Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.095804 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerStarted","Data":"d35e51b5b5b1eb19661e48a46b81015bec4fdcac4a6a6358a3b4b532fe5aeea8"} Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.143340 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-k82m5" podStartSLOduration=2.51205886 podStartE2EDuration="4.143317476s" podCreationTimestamp="2026-01-28 16:42:10 +0000 UTC" firstStartedPulling="2026-01-28 16:42:11.123774604 +0000 UTC m=+434.682101512" lastFinishedPulling="2026-01-28 16:42:12.75503324 +0000 UTC m=+436.313360128" observedRunningTime="2026-01-28 16:42:14.140566986 +0000 UTC m=+437.698893914" watchObservedRunningTime="2026-01-28 16:42:14.143317476 +0000 UTC m=+437.701644384" Jan 28 16:42:14 crc kubenswrapper[4877]: I0128 16:42:14.159171 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-b4qhm" podStartSLOduration=1.939377775 podStartE2EDuration="4.159151621s" podCreationTimestamp="2026-01-28 16:42:10 +0000 UTC" firstStartedPulling="2026-01-28 16:42:11.498396086 +0000 UTC m=+435.056722974" lastFinishedPulling="2026-01-28 16:42:13.718169932 +0000 UTC m=+437.276496820" observedRunningTime="2026-01-28 16:42:14.156628767 +0000 UTC m=+437.714955685" watchObservedRunningTime="2026-01-28 16:42:14.159151621 +0000 UTC m=+437.717478509" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.106182 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-t9zb7" event={"ID":"27428910-4c34-4ab0-8a04-37ccb49d6439","Type":"ContainerStarted","Data":"f2f499ac22e80c07df7d1cdbb52611736b86af7a037813f1b4715edd8938583a"} Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.106661 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-t9zb7" event={"ID":"27428910-4c34-4ab0-8a04-37ccb49d6439","Type":"ContainerStarted","Data":"81de6102f87bf44dbfcb3d979ef9ac86e4bd459641713d1e527cfea4389579a1"} Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.110084 4877 generic.go:334] "Generic (PLEG): container finished" podID="48dd70f6-d859-4318-a5b8-57945d0c3dd1" containerID="6d2e37955dec1074810682ecd1c5f5ca319cee02a2c3172ba6c93b33065ba650" exitCode=0 Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.110269 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerDied","Data":"6d2e37955dec1074810682ecd1c5f5ca319cee02a2c3172ba6c93b33065ba650"} Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.139615 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-t9zb7" podStartSLOduration=3.174384095 podStartE2EDuration="5.13958897s" podCreationTimestamp="2026-01-28 16:42:10 +0000 UTC" firstStartedPulling="2026-01-28 16:42:10.780672916 +0000 UTC m=+434.338999804" lastFinishedPulling="2026-01-28 16:42:12.745877791 +0000 UTC m=+436.304204679" observedRunningTime="2026-01-28 16:42:15.138024824 +0000 UTC m=+438.696351712" watchObservedRunningTime="2026-01-28 16:42:15.13958897 +0000 UTC m=+438.697915858" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.139850 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-64cf654bdf-29w6x"] Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.141108 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.161974 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64cf654bdf-29w6x"] Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.339468 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-serving-cert\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.339563 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-oauth-config\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.339631 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-oauth-serving-cert\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.339655 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-service-ca\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.339681 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-console-config\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.339707 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49tpz\" (UniqueName: \"kubernetes.io/projected/70728908-fa83-433c-9762-971e308ecd40-kube-api-access-49tpz\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.339738 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-trusted-ca-bundle\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.441523 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-oauth-serving-cert\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.441601 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-service-ca\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.441647 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-console-config\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.441689 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49tpz\" (UniqueName: \"kubernetes.io/projected/70728908-fa83-433c-9762-971e308ecd40-kube-api-access-49tpz\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.441741 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-trusted-ca-bundle\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.441820 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-serving-cert\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.441847 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-oauth-config\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.443048 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-oauth-serving-cert\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.443722 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-service-ca\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.443744 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-console-config\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.445032 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-trusted-ca-bundle\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.458310 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-oauth-config\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.458397 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-serving-cert\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.473402 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49tpz\" (UniqueName: \"kubernetes.io/projected/70728908-fa83-433c-9762-971e308ecd40-kube-api-access-49tpz\") pod \"console-64cf654bdf-29w6x\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.711757 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-fbbd74554-qkt8l"] Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.712813 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.727069 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-ldjd2" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.727141 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.727424 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.727707 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-d6g2imiac5784" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.727727 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.728021 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.728157 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-fbbd74554-qkt8l"] Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.766134 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/7829fe04-318e-4cda-adb5-4109e6d6f751-metrics-server-audit-profiles\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.766267 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.766280 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/7829fe04-318e-4cda-adb5-4109e6d6f751-audit-log\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.766539 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7829fe04-318e-4cda-adb5-4109e6d6f751-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.767045 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-secret-metrics-client-certs\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.767108 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-client-ca-bundle\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.768291 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-secret-metrics-server-tls\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.768341 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4cbw\" (UniqueName: \"kubernetes.io/projected/7829fe04-318e-4cda-adb5-4109e6d6f751-kube-api-access-p4cbw\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.870194 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-secret-metrics-server-tls\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.870243 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4cbw\" (UniqueName: \"kubernetes.io/projected/7829fe04-318e-4cda-adb5-4109e6d6f751-kube-api-access-p4cbw\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.870289 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/7829fe04-318e-4cda-adb5-4109e6d6f751-metrics-server-audit-profiles\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.870355 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/7829fe04-318e-4cda-adb5-4109e6d6f751-audit-log\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.870378 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7829fe04-318e-4cda-adb5-4109e6d6f751-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.870401 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-secret-metrics-client-certs\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.870440 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-client-ca-bundle\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.871601 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/7829fe04-318e-4cda-adb5-4109e6d6f751-audit-log\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.873365 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7829fe04-318e-4cda-adb5-4109e6d6f751-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.873405 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/7829fe04-318e-4cda-adb5-4109e6d6f751-metrics-server-audit-profiles\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.876207 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-secret-metrics-server-tls\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.876671 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-client-ca-bundle\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.886608 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/7829fe04-318e-4cda-adb5-4109e6d6f751-secret-metrics-client-certs\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:15 crc kubenswrapper[4877]: I0128 16:42:15.892067 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4cbw\" (UniqueName: \"kubernetes.io/projected/7829fe04-318e-4cda-adb5-4109e6d6f751-kube-api-access-p4cbw\") pod \"metrics-server-fbbd74554-qkt8l\" (UID: \"7829fe04-318e-4cda-adb5-4109e6d6f751\") " pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.014778 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64cf654bdf-29w6x"] Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.063268 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.109912 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8"] Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.113339 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.116190 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.116340 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.122149 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" event={"ID":"88f59584-7374-4487-a7ed-970ea8a838c0","Type":"ContainerStarted","Data":"21f99bdf95d3bcef7e4d24bc5abb9f63596076033a7c7d10ce1b23e66976a7c2"} Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.124390 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64cf654bdf-29w6x" event={"ID":"70728908-fa83-433c-9762-971e308ecd40","Type":"ContainerStarted","Data":"8aca157eeb8c04d60677edf6066de911a51315de0d8dea91bef34e91e211efd1"} Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.136227 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8"] Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.173266 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/e831d929-583c-4da5-8ab2-27d484da84b2-monitoring-plugin-cert\") pod \"monitoring-plugin-595f97fc4c-4kdp8\" (UID: \"e831d929-583c-4da5-8ab2-27d484da84b2\") " pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.275003 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/e831d929-583c-4da5-8ab2-27d484da84b2-monitoring-plugin-cert\") pod \"monitoring-plugin-595f97fc4c-4kdp8\" (UID: \"e831d929-583c-4da5-8ab2-27d484da84b2\") " pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.280680 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/e831d929-583c-4da5-8ab2-27d484da84b2-monitoring-plugin-cert\") pod \"monitoring-plugin-595f97fc4c-4kdp8\" (UID: \"e831d929-583c-4da5-8ab2-27d484da84b2\") " pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.456827 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.540083 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-fbbd74554-qkt8l"] Jan 28 16:42:16 crc kubenswrapper[4877]: W0128 16:42:16.569434 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7829fe04_318e_4cda_adb5_4109e6d6f751.slice/crio-894106545eac2966d831d8478ff36dd42b80ded69b35d0fac4abf9cc9f72fe4c WatchSource:0}: Error finding container 894106545eac2966d831d8478ff36dd42b80ded69b35d0fac4abf9cc9f72fe4c: Status 404 returned error can't find the container with id 894106545eac2966d831d8478ff36dd42b80ded69b35d0fac4abf9cc9f72fe4c Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.727545 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.733033 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.739978 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.740407 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.740641 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.740769 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.740979 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.741102 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.741215 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-c7uaeihss5ej7" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.741320 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.742469 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.742823 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-qlj5j" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.742955 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.748867 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.755381 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.759606 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786136 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786207 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbjxk\" (UniqueName: \"kubernetes.io/projected/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-kube-api-access-pbjxk\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786255 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786285 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786309 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786336 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786354 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-config\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786377 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786627 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-config-out\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786712 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786769 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.786988 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-web-config\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.787013 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.787069 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.787114 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.787213 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.787262 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.787301 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888004 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-config\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888068 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888096 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-config-out\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888120 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888142 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888156 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-web-config\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888171 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888227 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888284 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888310 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888333 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888349 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888379 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888409 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbjxk\" (UniqueName: \"kubernetes.io/projected/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-kube-api-access-pbjxk\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888442 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888461 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888494 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.888517 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.892747 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.893370 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8"] Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.895284 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.895345 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.896242 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.897892 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.900125 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.901093 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.902697 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-config-out\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.902847 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.903078 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.903872 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.905949 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: W0128 16:42:16.906792 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode831d929_583c_4da5_8ab2_27d484da84b2.slice/crio-8a921a408aa1b7335cb522a83caed8e29a94549b2c0ca0b7294881c7791537a8 WatchSource:0}: Error finding container 8a921a408aa1b7335cb522a83caed8e29a94549b2c0ca0b7294881c7791537a8: Status 404 returned error can't find the container with id 8a921a408aa1b7335cb522a83caed8e29a94549b2c0ca0b7294881c7791537a8 Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.909185 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.909236 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.912604 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-config\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.913539 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.913835 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbjxk\" (UniqueName: \"kubernetes.io/projected/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-kube-api-access-pbjxk\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:16 crc kubenswrapper[4877]: I0128 16:42:16.915007 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/5fbd26ca-eb13-4e63-b055-3ee514dbcea6-web-config\") pod \"prometheus-k8s-0\" (UID: \"5fbd26ca-eb13-4e63-b055-3ee514dbcea6\") " pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:17 crc kubenswrapper[4877]: I0128 16:42:17.101821 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:17 crc kubenswrapper[4877]: I0128 16:42:17.131980 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" event={"ID":"7829fe04-318e-4cda-adb5-4109e6d6f751","Type":"ContainerStarted","Data":"894106545eac2966d831d8478ff36dd42b80ded69b35d0fac4abf9cc9f72fe4c"} Jan 28 16:42:17 crc kubenswrapper[4877]: I0128 16:42:17.134678 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" event={"ID":"88f59584-7374-4487-a7ed-970ea8a838c0","Type":"ContainerStarted","Data":"b8e7c0eefa752af475269f45b64b671b98e70344967da18e182f6141b11f938b"} Jan 28 16:42:17 crc kubenswrapper[4877]: I0128 16:42:17.134773 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" event={"ID":"88f59584-7374-4487-a7ed-970ea8a838c0","Type":"ContainerStarted","Data":"06059b9d8d242b68eec7efc751d94e857ff9615ffb066b75de659e9c4bf4a095"} Jan 28 16:42:17 crc kubenswrapper[4877]: I0128 16:42:17.137228 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" event={"ID":"e831d929-583c-4da5-8ab2-27d484da84b2","Type":"ContainerStarted","Data":"8a921a408aa1b7335cb522a83caed8e29a94549b2c0ca0b7294881c7791537a8"} Jan 28 16:42:17 crc kubenswrapper[4877]: I0128 16:42:17.139186 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64cf654bdf-29w6x" event={"ID":"70728908-fa83-433c-9762-971e308ecd40","Type":"ContainerStarted","Data":"c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558"} Jan 28 16:42:17 crc kubenswrapper[4877]: I0128 16:42:17.170074 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-64cf654bdf-29w6x" podStartSLOduration=2.170050399 podStartE2EDuration="2.170050399s" podCreationTimestamp="2026-01-28 16:42:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:42:17.164808955 +0000 UTC m=+440.723135883" watchObservedRunningTime="2026-01-28 16:42:17.170050399 +0000 UTC m=+440.728377287" Jan 28 16:42:18 crc kubenswrapper[4877]: I0128 16:42:18.219601 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.172123 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" event={"ID":"88f59584-7374-4487-a7ed-970ea8a838c0","Type":"ContainerStarted","Data":"cac1b9a4903e8f06f78ac0b5f8de51cb684e60c25a90235df64f9a4773e86f4b"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.172867 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.172888 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" event={"ID":"88f59584-7374-4487-a7ed-970ea8a838c0","Type":"ContainerStarted","Data":"0006d84a537bb9fb60bab9ab3cb1d24d0dc911cfd8bf1a202221e8ba0770f0d8"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.172901 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" event={"ID":"88f59584-7374-4487-a7ed-970ea8a838c0","Type":"ContainerStarted","Data":"875b5b44a81b9e2c713f66217a5ebe75cff6864fbfe6ba4767593752f9c80559"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.176763 4877 generic.go:334] "Generic (PLEG): container finished" podID="5fbd26ca-eb13-4e63-b055-3ee514dbcea6" containerID="8199fe31f13bbaf0532dfb8846c01bf0e8fdb583c8892b15f9ed1abf5e4b70f4" exitCode=0 Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.177002 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerDied","Data":"8199fe31f13bbaf0532dfb8846c01bf0e8fdb583c8892b15f9ed1abf5e4b70f4"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.177058 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerStarted","Data":"f6cea5f918738b9d41fce7137432ecff6a2ce1d81a9762bad1bc08eb8b8daf7a"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.185832 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerStarted","Data":"dad3544eef77bb9c6958059409000782daa32ba90d6b80a77ad317e4587e036e"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.185888 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerStarted","Data":"e5e5bd7ff397de3eaf2bf5703a8aaa6e26558cb5332298bcd1728c6ede5cd038"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.185905 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerStarted","Data":"3d3ea2d4db4cb0949c24a2383e0318201f1b2d4f40c11a8ccf2da6917d4a95c7"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.185918 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerStarted","Data":"464eac6d8e93bc2f0dec636eea6694d06e2678746ad45158fcbb543e802b6683"} Jan 28 16:42:19 crc kubenswrapper[4877]: I0128 16:42:19.205145 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" podStartSLOduration=2.303213548 podStartE2EDuration="7.205126835s" podCreationTimestamp="2026-01-28 16:42:12 +0000 UTC" firstStartedPulling="2026-01-28 16:42:13.03995355 +0000 UTC m=+436.598280438" lastFinishedPulling="2026-01-28 16:42:17.941866837 +0000 UTC m=+441.500193725" observedRunningTime="2026-01-28 16:42:19.202704654 +0000 UTC m=+442.761031552" watchObservedRunningTime="2026-01-28 16:42:19.205126835 +0000 UTC m=+442.763453733" Jan 28 16:42:20 crc kubenswrapper[4877]: I0128 16:42:20.197085 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" event={"ID":"7829fe04-318e-4cda-adb5-4109e6d6f751","Type":"ContainerStarted","Data":"37722aa48435b88a0829b19a19182148c36d2f40151c17a01240122654767edf"} Jan 28 16:42:20 crc kubenswrapper[4877]: I0128 16:42:20.205237 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerStarted","Data":"8596352b16323723215fe2482c28f16edda5f979e286ec19cc3c150fe6a8dd07"} Jan 28 16:42:20 crc kubenswrapper[4877]: I0128 16:42:20.205288 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"48dd70f6-d859-4318-a5b8-57945d0c3dd1","Type":"ContainerStarted","Data":"2cc84b57705c6aadbbb3df8e83e0c4bb9110d7945193feed6c86eb2c21dd62b1"} Jan 28 16:42:20 crc kubenswrapper[4877]: I0128 16:42:20.207865 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" event={"ID":"e831d929-583c-4da5-8ab2-27d484da84b2","Type":"ContainerStarted","Data":"36300e99ae4f16937d5c14ec33ef233485d57719df9ac85c976d996cd7bd1f2a"} Jan 28 16:42:20 crc kubenswrapper[4877]: I0128 16:42:20.221867 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podStartSLOduration=2.249396238 podStartE2EDuration="5.221839578s" podCreationTimestamp="2026-01-28 16:42:15 +0000 UTC" firstStartedPulling="2026-01-28 16:42:16.572867437 +0000 UTC m=+440.131194325" lastFinishedPulling="2026-01-28 16:42:19.545310777 +0000 UTC m=+443.103637665" observedRunningTime="2026-01-28 16:42:20.220410196 +0000 UTC m=+443.778737134" watchObservedRunningTime="2026-01-28 16:42:20.221839578 +0000 UTC m=+443.780166466" Jan 28 16:42:20 crc kubenswrapper[4877]: I0128 16:42:20.241764 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" podStartSLOduration=1.606513447 podStartE2EDuration="4.241710351s" podCreationTimestamp="2026-01-28 16:42:16 +0000 UTC" firstStartedPulling="2026-01-28 16:42:16.909833564 +0000 UTC m=+440.468160452" lastFinishedPulling="2026-01-28 16:42:19.545030468 +0000 UTC m=+443.103357356" observedRunningTime="2026-01-28 16:42:20.2362102 +0000 UTC m=+443.794537088" watchObservedRunningTime="2026-01-28 16:42:20.241710351 +0000 UTC m=+443.800037249" Jan 28 16:42:20 crc kubenswrapper[4877]: I0128 16:42:20.296557 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=4.662822081 podStartE2EDuration="9.296533249s" podCreationTimestamp="2026-01-28 16:42:11 +0000 UTC" firstStartedPulling="2026-01-28 16:42:13.305682987 +0000 UTC m=+436.864009875" lastFinishedPulling="2026-01-28 16:42:17.939394155 +0000 UTC m=+441.497721043" observedRunningTime="2026-01-28 16:42:20.293783049 +0000 UTC m=+443.852109947" watchObservedRunningTime="2026-01-28 16:42:20.296533249 +0000 UTC m=+443.854860137" Jan 28 16:42:21 crc kubenswrapper[4877]: I0128 16:42:21.218589 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" Jan 28 16:42:21 crc kubenswrapper[4877]: I0128 16:42:21.229087 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" Jan 28 16:42:22 crc kubenswrapper[4877]: I0128 16:42:22.703655 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" Jan 28 16:42:23 crc kubenswrapper[4877]: I0128 16:42:23.238133 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerStarted","Data":"59ae3696c5c2b8927388080316f37877545613b98421210204b986223e72b046"} Jan 28 16:42:24 crc kubenswrapper[4877]: I0128 16:42:24.258303 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerStarted","Data":"f3002d719612e616c3d93180d30d3ba0fba620384944db2e29d2c364d0d8e20c"} Jan 28 16:42:24 crc kubenswrapper[4877]: I0128 16:42:24.260036 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerStarted","Data":"2ed1d44ff256712335cc52e8a2d42f6110c55e38ae929fcef2f5102f0deb4061"} Jan 28 16:42:24 crc kubenswrapper[4877]: I0128 16:42:24.260141 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerStarted","Data":"7cd9c7ba3524d16bb9650a81bce2f806fa1c5586f67ec0b5b3b2da57e8075692"} Jan 28 16:42:24 crc kubenswrapper[4877]: I0128 16:42:24.260211 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerStarted","Data":"1816865489af3fd979ded4103c5ef6faa2c91815fb29a87108abc39ccbf4313d"} Jan 28 16:42:24 crc kubenswrapper[4877]: I0128 16:42:24.260274 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"5fbd26ca-eb13-4e63-b055-3ee514dbcea6","Type":"ContainerStarted","Data":"849bf479505f53a22991b6a5e299beb162ef81a5e759db4c899771b5b0d52e8f"} Jan 28 16:42:24 crc kubenswrapper[4877]: I0128 16:42:24.309999 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=4.597866993 podStartE2EDuration="8.309978176s" podCreationTimestamp="2026-01-28 16:42:16 +0000 UTC" firstStartedPulling="2026-01-28 16:42:19.178321059 +0000 UTC m=+442.736647987" lastFinishedPulling="2026-01-28 16:42:22.890432242 +0000 UTC m=+446.448759170" observedRunningTime="2026-01-28 16:42:24.3033205 +0000 UTC m=+447.861647388" watchObservedRunningTime="2026-01-28 16:42:24.309978176 +0000 UTC m=+447.868305064" Jan 28 16:42:25 crc kubenswrapper[4877]: I0128 16:42:25.766660 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:25 crc kubenswrapper[4877]: I0128 16:42:25.766780 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:25 crc kubenswrapper[4877]: I0128 16:42:25.774581 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:26 crc kubenswrapper[4877]: I0128 16:42:26.284970 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:42:26 crc kubenswrapper[4877]: I0128 16:42:26.399434 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-f5npr"] Jan 28 16:42:27 crc kubenswrapper[4877]: I0128 16:42:27.103689 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:42:36 crc kubenswrapper[4877]: I0128 16:42:36.064911 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:36 crc kubenswrapper[4877]: I0128 16:42:36.065928 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:51 crc kubenswrapper[4877]: I0128 16:42:51.468306 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-f5npr" podUID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" containerName="console" containerID="cri-o://c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb" gracePeriod=15 Jan 28 16:42:51 crc kubenswrapper[4877]: I0128 16:42:51.924994 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-f5npr_ae95a71e-8f5b-45ac-b6e7-a78e2258de80/console/0.log" Jan 28 16:42:51 crc kubenswrapper[4877]: I0128 16:42:51.925838 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.075801 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-config\") pod \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.076481 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-serving-cert\") pod \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.076894 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvrf5\" (UniqueName: \"kubernetes.io/projected/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-kube-api-access-bvrf5\") pod \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.077142 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-service-ca\") pod \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.077380 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-oauth-config\") pod \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.077612 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-trusted-ca-bundle\") pod \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.077210 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-config" (OuterVolumeSpecName: "console-config") pod "ae95a71e-8f5b-45ac-b6e7-a78e2258de80" (UID: "ae95a71e-8f5b-45ac-b6e7-a78e2258de80"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.077853 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-service-ca" (OuterVolumeSpecName: "service-ca") pod "ae95a71e-8f5b-45ac-b6e7-a78e2258de80" (UID: "ae95a71e-8f5b-45ac-b6e7-a78e2258de80"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.077800 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-oauth-serving-cert\") pod \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\" (UID: \"ae95a71e-8f5b-45ac-b6e7-a78e2258de80\") " Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.078489 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ae95a71e-8f5b-45ac-b6e7-a78e2258de80" (UID: "ae95a71e-8f5b-45ac-b6e7-a78e2258de80"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.079115 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ae95a71e-8f5b-45ac-b6e7-a78e2258de80" (UID: "ae95a71e-8f5b-45ac-b6e7-a78e2258de80"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.079574 4877 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.079619 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.079638 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.079659 4877 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.086250 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ae95a71e-8f5b-45ac-b6e7-a78e2258de80" (UID: "ae95a71e-8f5b-45ac-b6e7-a78e2258de80"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.086365 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-kube-api-access-bvrf5" (OuterVolumeSpecName: "kube-api-access-bvrf5") pod "ae95a71e-8f5b-45ac-b6e7-a78e2258de80" (UID: "ae95a71e-8f5b-45ac-b6e7-a78e2258de80"). InnerVolumeSpecName "kube-api-access-bvrf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.088297 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ae95a71e-8f5b-45ac-b6e7-a78e2258de80" (UID: "ae95a71e-8f5b-45ac-b6e7-a78e2258de80"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.181707 4877 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.181795 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvrf5\" (UniqueName: \"kubernetes.io/projected/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-kube-api-access-bvrf5\") on node \"crc\" DevicePath \"\"" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.181828 4877 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae95a71e-8f5b-45ac-b6e7-a78e2258de80-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.531749 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-f5npr_ae95a71e-8f5b-45ac-b6e7-a78e2258de80/console/0.log" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.531877 4877 generic.go:334] "Generic (PLEG): container finished" podID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" containerID="c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb" exitCode=2 Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.531936 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-f5npr" event={"ID":"ae95a71e-8f5b-45ac-b6e7-a78e2258de80","Type":"ContainerDied","Data":"c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb"} Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.532000 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-f5npr" event={"ID":"ae95a71e-8f5b-45ac-b6e7-a78e2258de80","Type":"ContainerDied","Data":"14ceb84210e6231dcbef2eb43194699995bec5901d2f29a0976583c0fc6c0d62"} Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.532015 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-f5npr" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.532024 4877 scope.go:117] "RemoveContainer" containerID="c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.562308 4877 scope.go:117] "RemoveContainer" containerID="c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb" Jan 28 16:42:52 crc kubenswrapper[4877]: E0128 16:42:52.563165 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb\": container with ID starting with c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb not found: ID does not exist" containerID="c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.563215 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb"} err="failed to get container status \"c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb\": rpc error: code = NotFound desc = could not find container \"c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb\": container with ID starting with c3b25d06ca0cfccef8f144d713dae6bbf2af79aa1609a28c393cfae9cc22dceb not found: ID does not exist" Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.589682 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-f5npr"] Jan 28 16:42:52 crc kubenswrapper[4877]: I0128 16:42:52.597454 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-f5npr"] Jan 28 16:42:53 crc kubenswrapper[4877]: I0128 16:42:53.351712 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" path="/var/lib/kubelet/pods/ae95a71e-8f5b-45ac-b6e7-a78e2258de80/volumes" Jan 28 16:42:56 crc kubenswrapper[4877]: I0128 16:42:56.075527 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:42:56 crc kubenswrapper[4877]: I0128 16:42:56.083794 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 16:43:17 crc kubenswrapper[4877]: I0128 16:43:17.103214 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:43:17 crc kubenswrapper[4877]: I0128 16:43:17.160051 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:43:18 crc kubenswrapper[4877]: I0128 16:43:17.779384 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.426934 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5df55ff997-m9q7p"] Jan 28 16:43:41 crc kubenswrapper[4877]: E0128 16:43:41.428307 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" containerName="console" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.428334 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" containerName="console" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.428782 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae95a71e-8f5b-45ac-b6e7-a78e2258de80" containerName="console" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.430859 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.438802 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5df55ff997-m9q7p"] Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.558744 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-service-ca\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.559375 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-oauth-config\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.559405 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-serving-cert\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.559425 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkvzw\" (UniqueName: \"kubernetes.io/projected/e6b64fae-3c03-438f-a673-28a924ce0f6d-kube-api-access-kkvzw\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.559458 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-config\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.559508 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-oauth-serving-cert\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.559870 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-trusted-ca-bundle\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.662081 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-service-ca\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.662148 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-oauth-config\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.662171 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-serving-cert\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.662192 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkvzw\" (UniqueName: \"kubernetes.io/projected/e6b64fae-3c03-438f-a673-28a924ce0f6d-kube-api-access-kkvzw\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.662229 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-config\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.662253 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-oauth-serving-cert\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.662306 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-trusted-ca-bundle\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.663727 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-service-ca\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.663966 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-oauth-serving-cert\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.664309 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-trusted-ca-bundle\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.664600 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-config\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.670221 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-oauth-config\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.675385 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-serving-cert\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.701940 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkvzw\" (UniqueName: \"kubernetes.io/projected/e6b64fae-3c03-438f-a673-28a924ce0f6d-kube-api-access-kkvzw\") pod \"console-5df55ff997-m9q7p\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:41 crc kubenswrapper[4877]: I0128 16:43:41.762216 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:42 crc kubenswrapper[4877]: I0128 16:43:42.245301 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5df55ff997-m9q7p"] Jan 28 16:43:42 crc kubenswrapper[4877]: I0128 16:43:42.935428 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5df55ff997-m9q7p" event={"ID":"e6b64fae-3c03-438f-a673-28a924ce0f6d","Type":"ContainerStarted","Data":"3828c43525f16efed913d692131b709adf9bc1f1ed300a6475030c1e08ae8751"} Jan 28 16:43:42 crc kubenswrapper[4877]: I0128 16:43:42.936034 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5df55ff997-m9q7p" event={"ID":"e6b64fae-3c03-438f-a673-28a924ce0f6d","Type":"ContainerStarted","Data":"91c4668587a88550a83b91f017bd67d1e443bb46916b7116cdd9f182dd7893b1"} Jan 28 16:43:42 crc kubenswrapper[4877]: I0128 16:43:42.967713 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5df55ff997-m9q7p" podStartSLOduration=1.967679865 podStartE2EDuration="1.967679865s" podCreationTimestamp="2026-01-28 16:43:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:43:42.962846311 +0000 UTC m=+526.521173199" watchObservedRunningTime="2026-01-28 16:43:42.967679865 +0000 UTC m=+526.526006753" Jan 28 16:43:51 crc kubenswrapper[4877]: I0128 16:43:51.763559 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:51 crc kubenswrapper[4877]: I0128 16:43:51.764198 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:51 crc kubenswrapper[4877]: I0128 16:43:51.771576 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:52 crc kubenswrapper[4877]: I0128 16:43:52.019317 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:43:52 crc kubenswrapper[4877]: I0128 16:43:52.156738 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-64cf654bdf-29w6x"] Jan 28 16:44:07 crc kubenswrapper[4877]: I0128 16:44:07.077087 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:44:07 crc kubenswrapper[4877]: I0128 16:44:07.077759 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:44:17 crc kubenswrapper[4877]: I0128 16:44:17.202761 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-64cf654bdf-29w6x" podUID="70728908-fa83-433c-9762-971e308ecd40" containerName="console" containerID="cri-o://c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558" gracePeriod=15 Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.144534 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-64cf654bdf-29w6x_70728908-fa83-433c-9762-971e308ecd40/console/0.log" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.145008 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.214300 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49tpz\" (UniqueName: \"kubernetes.io/projected/70728908-fa83-433c-9762-971e308ecd40-kube-api-access-49tpz\") pod \"70728908-fa83-433c-9762-971e308ecd40\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.214439 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-service-ca\") pod \"70728908-fa83-433c-9762-971e308ecd40\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.214504 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-console-config\") pod \"70728908-fa83-433c-9762-971e308ecd40\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.214628 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-serving-cert\") pod \"70728908-fa83-433c-9762-971e308ecd40\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.214701 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-oauth-serving-cert\") pod \"70728908-fa83-433c-9762-971e308ecd40\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.214736 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-trusted-ca-bundle\") pod \"70728908-fa83-433c-9762-971e308ecd40\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.214803 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-oauth-config\") pod \"70728908-fa83-433c-9762-971e308ecd40\" (UID: \"70728908-fa83-433c-9762-971e308ecd40\") " Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.217203 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-console-config" (OuterVolumeSpecName: "console-config") pod "70728908-fa83-433c-9762-971e308ecd40" (UID: "70728908-fa83-433c-9762-971e308ecd40"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.217200 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "70728908-fa83-433c-9762-971e308ecd40" (UID: "70728908-fa83-433c-9762-971e308ecd40"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.217329 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-service-ca" (OuterVolumeSpecName: "service-ca") pod "70728908-fa83-433c-9762-971e308ecd40" (UID: "70728908-fa83-433c-9762-971e308ecd40"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.217456 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "70728908-fa83-433c-9762-971e308ecd40" (UID: "70728908-fa83-433c-9762-971e308ecd40"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.222586 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "70728908-fa83-433c-9762-971e308ecd40" (UID: "70728908-fa83-433c-9762-971e308ecd40"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.223821 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70728908-fa83-433c-9762-971e308ecd40-kube-api-access-49tpz" (OuterVolumeSpecName: "kube-api-access-49tpz") pod "70728908-fa83-433c-9762-971e308ecd40" (UID: "70728908-fa83-433c-9762-971e308ecd40"). InnerVolumeSpecName "kube-api-access-49tpz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.225949 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "70728908-fa83-433c-9762-971e308ecd40" (UID: "70728908-fa83-433c-9762-971e308ecd40"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.241855 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-64cf654bdf-29w6x_70728908-fa83-433c-9762-971e308ecd40/console/0.log" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.241954 4877 generic.go:334] "Generic (PLEG): container finished" podID="70728908-fa83-433c-9762-971e308ecd40" containerID="c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558" exitCode=2 Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.242021 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64cf654bdf-29w6x" event={"ID":"70728908-fa83-433c-9762-971e308ecd40","Type":"ContainerDied","Data":"c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558"} Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.242068 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64cf654bdf-29w6x" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.242116 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64cf654bdf-29w6x" event={"ID":"70728908-fa83-433c-9762-971e308ecd40","Type":"ContainerDied","Data":"8aca157eeb8c04d60677edf6066de911a51315de0d8dea91bef34e91e211efd1"} Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.242149 4877 scope.go:117] "RemoveContainer" containerID="c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.299335 4877 scope.go:117] "RemoveContainer" containerID="c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558" Jan 28 16:44:18 crc kubenswrapper[4877]: E0128 16:44:18.302757 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558\": container with ID starting with c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558 not found: ID does not exist" containerID="c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.302828 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558"} err="failed to get container status \"c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558\": rpc error: code = NotFound desc = could not find container \"c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558\": container with ID starting with c1d608aaba8e0ab032b67d6e6db5e8525b062fe941398c861047ec177d24f558 not found: ID does not exist" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.305277 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-64cf654bdf-29w6x"] Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.310688 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-64cf654bdf-29w6x"] Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.316827 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.316871 4877 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.316895 4877 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.316918 4877 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.316937 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70728908-fa83-433c-9762-971e308ecd40-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.316956 4877 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/70728908-fa83-433c-9762-971e308ecd40-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:44:18 crc kubenswrapper[4877]: I0128 16:44:18.316975 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49tpz\" (UniqueName: \"kubernetes.io/projected/70728908-fa83-433c-9762-971e308ecd40-kube-api-access-49tpz\") on node \"crc\" DevicePath \"\"" Jan 28 16:44:19 crc kubenswrapper[4877]: I0128 16:44:19.343120 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70728908-fa83-433c-9762-971e308ecd40" path="/var/lib/kubelet/pods/70728908-fa83-433c-9762-971e308ecd40/volumes" Jan 28 16:44:37 crc kubenswrapper[4877]: I0128 16:44:37.076230 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:44:37 crc kubenswrapper[4877]: I0128 16:44:37.076925 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.208570 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd"] Jan 28 16:45:00 crc kubenswrapper[4877]: E0128 16:45:00.209742 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70728908-fa83-433c-9762-971e308ecd40" containerName="console" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.209759 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="70728908-fa83-433c-9762-971e308ecd40" containerName="console" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.209929 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="70728908-fa83-433c-9762-971e308ecd40" containerName="console" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.210499 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.213959 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.214895 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.226767 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd"] Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.389087 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prb6d\" (UniqueName: \"kubernetes.io/projected/a605e496-3f0b-4d5d-869e-f39742045553-kube-api-access-prb6d\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.389388 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a605e496-3f0b-4d5d-869e-f39742045553-secret-volume\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.390636 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a605e496-3f0b-4d5d-869e-f39742045553-config-volume\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.492886 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a605e496-3f0b-4d5d-869e-f39742045553-secret-volume\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.492975 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a605e496-3f0b-4d5d-869e-f39742045553-config-volume\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.493147 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prb6d\" (UniqueName: \"kubernetes.io/projected/a605e496-3f0b-4d5d-869e-f39742045553-kube-api-access-prb6d\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.495104 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a605e496-3f0b-4d5d-869e-f39742045553-config-volume\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.505937 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a605e496-3f0b-4d5d-869e-f39742045553-secret-volume\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.527303 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prb6d\" (UniqueName: \"kubernetes.io/projected/a605e496-3f0b-4d5d-869e-f39742045553-kube-api-access-prb6d\") pod \"collect-profiles-29493645-pn7wd\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.536892 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:00 crc kubenswrapper[4877]: I0128 16:45:00.787697 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd"] Jan 28 16:45:01 crc kubenswrapper[4877]: I0128 16:45:01.615373 4877 generic.go:334] "Generic (PLEG): container finished" podID="a605e496-3f0b-4d5d-869e-f39742045553" containerID="cb6d6e18484c430d6b4740a6e4ad872ab74d6e3be62cec894554db87c05e231d" exitCode=0 Jan 28 16:45:01 crc kubenswrapper[4877]: I0128 16:45:01.615465 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" event={"ID":"a605e496-3f0b-4d5d-869e-f39742045553","Type":"ContainerDied","Data":"cb6d6e18484c430d6b4740a6e4ad872ab74d6e3be62cec894554db87c05e231d"} Jan 28 16:45:01 crc kubenswrapper[4877]: I0128 16:45:01.615618 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" event={"ID":"a605e496-3f0b-4d5d-869e-f39742045553","Type":"ContainerStarted","Data":"43d002a2624b7c7ee1bc85d708a0914725fefe4cc4f56613c988e3561394a816"} Jan 28 16:45:02 crc kubenswrapper[4877]: I0128 16:45:02.937932 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.038820 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a605e496-3f0b-4d5d-869e-f39742045553-secret-volume\") pod \"a605e496-3f0b-4d5d-869e-f39742045553\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.039406 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prb6d\" (UniqueName: \"kubernetes.io/projected/a605e496-3f0b-4d5d-869e-f39742045553-kube-api-access-prb6d\") pod \"a605e496-3f0b-4d5d-869e-f39742045553\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.039632 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a605e496-3f0b-4d5d-869e-f39742045553-config-volume\") pod \"a605e496-3f0b-4d5d-869e-f39742045553\" (UID: \"a605e496-3f0b-4d5d-869e-f39742045553\") " Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.041885 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a605e496-3f0b-4d5d-869e-f39742045553-config-volume" (OuterVolumeSpecName: "config-volume") pod "a605e496-3f0b-4d5d-869e-f39742045553" (UID: "a605e496-3f0b-4d5d-869e-f39742045553"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.049406 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a605e496-3f0b-4d5d-869e-f39742045553-kube-api-access-prb6d" (OuterVolumeSpecName: "kube-api-access-prb6d") pod "a605e496-3f0b-4d5d-869e-f39742045553" (UID: "a605e496-3f0b-4d5d-869e-f39742045553"). InnerVolumeSpecName "kube-api-access-prb6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.049906 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a605e496-3f0b-4d5d-869e-f39742045553-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a605e496-3f0b-4d5d-869e-f39742045553" (UID: "a605e496-3f0b-4d5d-869e-f39742045553"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.142625 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prb6d\" (UniqueName: \"kubernetes.io/projected/a605e496-3f0b-4d5d-869e-f39742045553-kube-api-access-prb6d\") on node \"crc\" DevicePath \"\"" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.142666 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a605e496-3f0b-4d5d-869e-f39742045553-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.142676 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a605e496-3f0b-4d5d-869e-f39742045553-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.635906 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" event={"ID":"a605e496-3f0b-4d5d-869e-f39742045553","Type":"ContainerDied","Data":"43d002a2624b7c7ee1bc85d708a0914725fefe4cc4f56613c988e3561394a816"} Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.635960 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43d002a2624b7c7ee1bc85d708a0914725fefe4cc4f56613c988e3561394a816" Jan 28 16:45:03 crc kubenswrapper[4877]: I0128 16:45:03.636038 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd" Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.077292 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.077881 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.077965 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.079523 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f87fd390161fe2cfa7f8b535b8c64bf410aed6149ff27de8dbf9c3d787641d32"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.079687 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://f87fd390161fe2cfa7f8b535b8c64bf410aed6149ff27de8dbf9c3d787641d32" gracePeriod=600 Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.670406 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="f87fd390161fe2cfa7f8b535b8c64bf410aed6149ff27de8dbf9c3d787641d32" exitCode=0 Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.670433 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"f87fd390161fe2cfa7f8b535b8c64bf410aed6149ff27de8dbf9c3d787641d32"} Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.670915 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"009b4d91077431e907042d1b0b9c62d4fcd7c95df803c80a1b641a0306375a2c"} Jan 28 16:45:07 crc kubenswrapper[4877]: I0128 16:45:07.670942 4877 scope.go:117] "RemoveContainer" containerID="163e625204e85fa60aefc636260cc789258eff00206d927c91d05b2e7e892ef9" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.599900 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g"] Jan 28 16:46:54 crc kubenswrapper[4877]: E0128 16:46:54.602041 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a605e496-3f0b-4d5d-869e-f39742045553" containerName="collect-profiles" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.602091 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a605e496-3f0b-4d5d-869e-f39742045553" containerName="collect-profiles" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.602271 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="a605e496-3f0b-4d5d-869e-f39742045553" containerName="collect-profiles" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.603538 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.606240 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.620138 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g"] Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.647131 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.647202 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.647289 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmgm5\" (UniqueName: \"kubernetes.io/projected/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-kube-api-access-zmgm5\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.749102 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.749212 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.749258 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmgm5\" (UniqueName: \"kubernetes.io/projected/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-kube-api-access-zmgm5\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.749719 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.750244 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.775056 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmgm5\" (UniqueName: \"kubernetes.io/projected/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-kube-api-access-zmgm5\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:54 crc kubenswrapper[4877]: I0128 16:46:54.929672 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:46:55 crc kubenswrapper[4877]: I0128 16:46:55.180229 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g"] Jan 28 16:46:55 crc kubenswrapper[4877]: I0128 16:46:55.514049 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" event={"ID":"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1","Type":"ContainerStarted","Data":"5b16d4d1e370ebf29dbe30fc3f4713ecd6ac2b01fb248bee39342abe865f7d8e"} Jan 28 16:46:55 crc kubenswrapper[4877]: I0128 16:46:55.514117 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" event={"ID":"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1","Type":"ContainerStarted","Data":"7a4c9c94b97011a9bda9d54f18ff9ff0df91df17e63d92ab762522ff178ee3fd"} Jan 28 16:46:56 crc kubenswrapper[4877]: I0128 16:46:56.525125 4877 generic.go:334] "Generic (PLEG): container finished" podID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerID="5b16d4d1e370ebf29dbe30fc3f4713ecd6ac2b01fb248bee39342abe865f7d8e" exitCode=0 Jan 28 16:46:56 crc kubenswrapper[4877]: I0128 16:46:56.525219 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" event={"ID":"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1","Type":"ContainerDied","Data":"5b16d4d1e370ebf29dbe30fc3f4713ecd6ac2b01fb248bee39342abe865f7d8e"} Jan 28 16:46:56 crc kubenswrapper[4877]: I0128 16:46:56.528425 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 16:46:58 crc kubenswrapper[4877]: I0128 16:46:58.539886 4877 generic.go:334] "Generic (PLEG): container finished" podID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerID="732d8ace421997d1c08845dce3f9f2de4769f8bca8062882f3edd810e2327302" exitCode=0 Jan 28 16:46:58 crc kubenswrapper[4877]: I0128 16:46:58.539969 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" event={"ID":"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1","Type":"ContainerDied","Data":"732d8ace421997d1c08845dce3f9f2de4769f8bca8062882f3edd810e2327302"} Jan 28 16:46:59 crc kubenswrapper[4877]: I0128 16:46:59.550516 4877 generic.go:334] "Generic (PLEG): container finished" podID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerID="59b16cc873f9b81d6179c882941781285e0d5c3f850413795e9b081695320f2a" exitCode=0 Jan 28 16:46:59 crc kubenswrapper[4877]: I0128 16:46:59.550567 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" event={"ID":"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1","Type":"ContainerDied","Data":"59b16cc873f9b81d6179c882941781285e0d5c3f850413795e9b081695320f2a"} Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.819626 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.853846 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-bundle\") pod \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.853934 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-util\") pod \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.854005 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmgm5\" (UniqueName: \"kubernetes.io/projected/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-kube-api-access-zmgm5\") pod \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\" (UID: \"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1\") " Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.856553 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-bundle" (OuterVolumeSpecName: "bundle") pod "b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" (UID: "b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.862053 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-kube-api-access-zmgm5" (OuterVolumeSpecName: "kube-api-access-zmgm5") pod "b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" (UID: "b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1"). InnerVolumeSpecName "kube-api-access-zmgm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.956816 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmgm5\" (UniqueName: \"kubernetes.io/projected/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-kube-api-access-zmgm5\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:00 crc kubenswrapper[4877]: I0128 16:47:00.956867 4877 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:01 crc kubenswrapper[4877]: I0128 16:47:01.031839 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-util" (OuterVolumeSpecName: "util") pod "b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" (UID: "b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:47:01 crc kubenswrapper[4877]: I0128 16:47:01.058879 4877 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1-util\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:01 crc kubenswrapper[4877]: I0128 16:47:01.573727 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" event={"ID":"b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1","Type":"ContainerDied","Data":"7a4c9c94b97011a9bda9d54f18ff9ff0df91df17e63d92ab762522ff178ee3fd"} Jan 28 16:47:01 crc kubenswrapper[4877]: I0128 16:47:01.573814 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a4c9c94b97011a9bda9d54f18ff9ff0df91df17e63d92ab762522ff178ee3fd" Jan 28 16:47:01 crc kubenswrapper[4877]: I0128 16:47:01.573810 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08hkz7g" Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.822431 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5gw27"] Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.823586 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-controller" containerID="cri-o://b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa" gracePeriod=30 Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.823690 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="nbdb" containerID="cri-o://d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f" gracePeriod=30 Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.823763 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-node" containerID="cri-o://741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d" gracePeriod=30 Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.823831 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-acl-logging" containerID="cri-o://99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545" gracePeriod=30 Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.823763 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="northd" containerID="cri-o://41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0" gracePeriod=30 Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.823892 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a" gracePeriod=30 Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.825775 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="sbdb" containerID="cri-o://801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683" gracePeriod=30 Jan 28 16:47:05 crc kubenswrapper[4877]: I0128 16:47:05.893409 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" containerID="cri-o://7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e" gracePeriod=30 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.619885 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/2.log" Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.623645 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/1.log" Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.623727 4877 generic.go:334] "Generic (PLEG): container finished" podID="2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a" containerID="1a806e67e9fc104f5c007ae476ce9c24b6f511eb3bfb6094c15c3872b5d991f7" exitCode=2 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.623830 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerDied","Data":"1a806e67e9fc104f5c007ae476ce9c24b6f511eb3bfb6094c15c3872b5d991f7"} Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.623934 4877 scope.go:117] "RemoveContainer" containerID="36b4d387fb9ff0faec293faf540dca479867d1b5f5df34cdfad39d1e62348033" Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.624896 4877 scope.go:117] "RemoveContainer" containerID="1a806e67e9fc104f5c007ae476ce9c24b6f511eb3bfb6094c15c3872b5d991f7" Jan 28 16:47:06 crc kubenswrapper[4877]: E0128 16:47:06.625237 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-hbxsq_openshift-multus(2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a)\"" pod="openshift-multus/multus-hbxsq" podUID="2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a" Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.639315 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovnkube-controller/3.log" Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.654886 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovn-acl-logging/0.log" Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.655579 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovn-controller/0.log" Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656080 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e" exitCode=0 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656113 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683" exitCode=0 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656121 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f" exitCode=0 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656131 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0" exitCode=0 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656158 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545" exitCode=143 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656170 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa" exitCode=143 Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656173 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e"} Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656247 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683"} Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656260 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f"} Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656271 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0"} Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656288 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545"} Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.656298 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa"} Jan 28 16:47:06 crc kubenswrapper[4877]: I0128 16:47:06.677027 4877 scope.go:117] "RemoveContainer" containerID="ea898e1c900189816c5818f182ae3d2e34b4c6655591f4f30d6a70d60349dc7f" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.076622 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.076718 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.530467 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovn-acl-logging/0.log" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.531142 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovn-controller/0.log" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.531571 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572525 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-config\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572628 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-systemd-units\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572658 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-systemd\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572748 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572765 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-slash\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572796 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-bin\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572820 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovn-node-metrics-cert\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572802 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572843 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-env-overrides\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572930 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-ovn\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.572990 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-openvswitch\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573046 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-var-lib-openvswitch\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573056 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573102 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-log-socket" (OuterVolumeSpecName: "log-socket") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573076 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-log-socket\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573127 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-slash" (OuterVolumeSpecName: "host-slash") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573146 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-etc-openvswitch\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573175 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-netns\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573207 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-node-log\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573275 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdz8t\" (UniqueName: \"kubernetes.io/projected/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-kube-api-access-gdz8t\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573292 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573299 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-kubelet\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573370 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-script-lib\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573399 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-ovn-kubernetes\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573419 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-netd\") pod \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\" (UID: \"3138aa2e-dca5-4d62-aa47-1fd2b559baaf\") " Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573324 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573341 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573359 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573383 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573401 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573427 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573444 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-node-log" (OuterVolumeSpecName: "node-log") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573586 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.573748 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574069 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574109 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574373 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574864 4877 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574887 4877 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-log-socket\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574901 4877 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574915 4877 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574927 4877 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-node-log\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574939 4877 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574952 4877 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574968 4877 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574983 4877 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.574994 4877 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.575005 4877 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.575019 4877 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.575034 4877 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-slash\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.575047 4877 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.575062 4877 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.575077 4877 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.575090 4877 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.584840 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-kube-api-access-gdz8t" (OuterVolumeSpecName: "kube-api-access-gdz8t") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "kube-api-access-gdz8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.585915 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.604629 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "3138aa2e-dca5-4d62-aa47-1fd2b559baaf" (UID: "3138aa2e-dca5-4d62-aa47-1fd2b559baaf"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642072 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8slsx"] Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642429 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642452 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642465 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642555 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642568 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerName="util" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642576 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerName="util" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642588 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642594 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642605 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="nbdb" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642611 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="nbdb" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642619 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="northd" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642626 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="northd" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642638 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kubecfg-setup" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642652 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kubecfg-setup" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642662 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-node" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642668 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-node" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642677 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642682 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642690 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642696 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642702 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerName="pull" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642708 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerName="pull" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642715 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerName="extract" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642722 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerName="extract" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642730 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642735 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642746 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-acl-logging" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642752 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-acl-logging" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.642762 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="sbdb" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642774 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="sbdb" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642919 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642936 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642944 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642954 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642962 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovn-acl-logging" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642971 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="kube-rbac-proxy-node" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642978 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="northd" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642985 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642992 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="nbdb" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.642998 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="sbdb" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.643004 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6a5a91e-acfb-461c-b5ce-7030c7ebb0c1" containerName="extract" Jan 28 16:47:07 crc kubenswrapper[4877]: E0128 16:47:07.643148 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.643155 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.643275 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.643286 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerName="ovnkube-controller" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.650253 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.681394 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdz8t\" (UniqueName: \"kubernetes.io/projected/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-kube-api-access-gdz8t\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.681442 4877 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.681455 4877 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3138aa2e-dca5-4d62-aa47-1fd2b559baaf-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.733539 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/2.log" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782537 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-run-ovn-kubernetes\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782598 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c26f4211-6c76-421f-add4-6d58385c9219-ovn-node-metrics-cert\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782629 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-cni-netd\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782649 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-ovnkube-config\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782674 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782694 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-ovn\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782709 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-systemd-units\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782729 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-slash\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782744 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-node-log\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782758 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-systemd\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782773 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-ovnkube-script-lib\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782790 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-run-netns\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782811 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782832 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-etc-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782858 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4slkj\" (UniqueName: \"kubernetes.io/projected/c26f4211-6c76-421f-add4-6d58385c9219-kube-api-access-4slkj\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782875 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-var-lib-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782890 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-log-socket\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782912 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-env-overrides\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782932 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-kubelet\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.782954 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-cni-bin\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.790917 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovn-acl-logging/0.log" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.791663 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5gw27_3138aa2e-dca5-4d62-aa47-1fd2b559baaf/ovn-controller/0.log" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.792327 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a" exitCode=0 Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.792412 4877 generic.go:334] "Generic (PLEG): container finished" podID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" containerID="741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d" exitCode=0 Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.792548 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a"} Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.792666 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d"} Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.792740 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" event={"ID":"3138aa2e-dca5-4d62-aa47-1fd2b559baaf","Type":"ContainerDied","Data":"7230247f1466a5475d643ecd972c2b5247a82a76f574c3c9e36a7230af1d6b1b"} Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.792812 4877 scope.go:117] "RemoveContainer" containerID="7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.793079 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5gw27" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.827615 4877 scope.go:117] "RemoveContainer" containerID="801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.860739 4877 scope.go:117] "RemoveContainer" containerID="d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.874130 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5gw27"] Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887236 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-cni-bin\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887311 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-run-ovn-kubernetes\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887341 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c26f4211-6c76-421f-add4-6d58385c9219-ovn-node-metrics-cert\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887370 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-cni-netd\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887387 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-ovnkube-config\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887412 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887431 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-ovn\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887451 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-systemd-units\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887487 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-slash\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887507 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-node-log\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887525 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-systemd\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887544 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-ovnkube-script-lib\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887647 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-run-netns\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887705 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887733 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-etc-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887792 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4slkj\" (UniqueName: \"kubernetes.io/projected/c26f4211-6c76-421f-add4-6d58385c9219-kube-api-access-4slkj\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887817 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-var-lib-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887838 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-log-socket\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887871 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-env-overrides\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887897 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-kubelet\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.887992 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-kubelet\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.888036 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-cni-bin\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.888059 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-run-ovn-kubernetes\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.888448 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-systemd\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.888553 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-cni-netd\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889324 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-ovnkube-config\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889385 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889425 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-run-ovn\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889567 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-run-netns\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889642 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-etc-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889747 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889796 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-host-slash\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889833 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-var-lib-openvswitch\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.890183 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-ovnkube-script-lib\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.890239 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-node-log\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.890252 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-log-socket\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.890710 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c26f4211-6c76-421f-add4-6d58385c9219-env-overrides\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.889458 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c26f4211-6c76-421f-add4-6d58385c9219-systemd-units\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.896798 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c26f4211-6c76-421f-add4-6d58385c9219-ovn-node-metrics-cert\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.901902 4877 scope.go:117] "RemoveContainer" containerID="41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.912640 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5gw27"] Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.938656 4877 scope.go:117] "RemoveContainer" containerID="08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.955246 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4slkj\" (UniqueName: \"kubernetes.io/projected/c26f4211-6c76-421f-add4-6d58385c9219-kube-api-access-4slkj\") pod \"ovnkube-node-8slsx\" (UID: \"c26f4211-6c76-421f-add4-6d58385c9219\") " pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:07 crc kubenswrapper[4877]: I0128 16:47:07.983909 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.028002 4877 scope.go:117] "RemoveContainer" containerID="741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.115254 4877 scope.go:117] "RemoveContainer" containerID="99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.142764 4877 scope.go:117] "RemoveContainer" containerID="b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.172699 4877 scope.go:117] "RemoveContainer" containerID="dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.204955 4877 scope.go:117] "RemoveContainer" containerID="7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.205699 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e\": container with ID starting with 7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e not found: ID does not exist" containerID="7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.205761 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e"} err="failed to get container status \"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e\": rpc error: code = NotFound desc = could not find container \"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e\": container with ID starting with 7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.205802 4877 scope.go:117] "RemoveContainer" containerID="801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.206164 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\": container with ID starting with 801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683 not found: ID does not exist" containerID="801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.206194 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683"} err="failed to get container status \"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\": rpc error: code = NotFound desc = could not find container \"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\": container with ID starting with 801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.206216 4877 scope.go:117] "RemoveContainer" containerID="d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.206681 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\": container with ID starting with d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f not found: ID does not exist" containerID="d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.206719 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f"} err="failed to get container status \"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\": rpc error: code = NotFound desc = could not find container \"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\": container with ID starting with d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.206745 4877 scope.go:117] "RemoveContainer" containerID="41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.207136 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\": container with ID starting with 41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0 not found: ID does not exist" containerID="41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.207206 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0"} err="failed to get container status \"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\": rpc error: code = NotFound desc = could not find container \"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\": container with ID starting with 41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.207239 4877 scope.go:117] "RemoveContainer" containerID="08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.207631 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\": container with ID starting with 08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a not found: ID does not exist" containerID="08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.207658 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a"} err="failed to get container status \"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\": rpc error: code = NotFound desc = could not find container \"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\": container with ID starting with 08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.207677 4877 scope.go:117] "RemoveContainer" containerID="741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.207905 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\": container with ID starting with 741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d not found: ID does not exist" containerID="741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.207925 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d"} err="failed to get container status \"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\": rpc error: code = NotFound desc = could not find container \"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\": container with ID starting with 741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.207939 4877 scope.go:117] "RemoveContainer" containerID="99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.208148 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\": container with ID starting with 99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545 not found: ID does not exist" containerID="99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.208169 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545"} err="failed to get container status \"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\": rpc error: code = NotFound desc = could not find container \"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\": container with ID starting with 99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.208184 4877 scope.go:117] "RemoveContainer" containerID="b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.208389 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\": container with ID starting with b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa not found: ID does not exist" containerID="b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.208409 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa"} err="failed to get container status \"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\": rpc error: code = NotFound desc = could not find container \"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\": container with ID starting with b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.208420 4877 scope.go:117] "RemoveContainer" containerID="dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280" Jan 28 16:47:08 crc kubenswrapper[4877]: E0128 16:47:08.208624 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\": container with ID starting with dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280 not found: ID does not exist" containerID="dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.208683 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280"} err="failed to get container status \"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\": rpc error: code = NotFound desc = could not find container \"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\": container with ID starting with dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.208697 4877 scope.go:117] "RemoveContainer" containerID="7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.209559 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e"} err="failed to get container status \"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e\": rpc error: code = NotFound desc = could not find container \"7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e\": container with ID starting with 7dfd300cb0b1834da026cd13b1644de7cbe59ec8f72487eb1e4abb57bd4c4c8e not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.209584 4877 scope.go:117] "RemoveContainer" containerID="801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.209842 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683"} err="failed to get container status \"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\": rpc error: code = NotFound desc = could not find container \"801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683\": container with ID starting with 801a131f9b275618b61beb183b11269ec285575dd065a7336283d593e1787683 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.209865 4877 scope.go:117] "RemoveContainer" containerID="d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.210561 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f"} err="failed to get container status \"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\": rpc error: code = NotFound desc = could not find container \"d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f\": container with ID starting with d80045fc38c009046182169fd6ac005f5d397121673aeda94eaddb43a0b6727f not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.210583 4877 scope.go:117] "RemoveContainer" containerID="41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.210826 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0"} err="failed to get container status \"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\": rpc error: code = NotFound desc = could not find container \"41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0\": container with ID starting with 41f18b489755a4e0789dca55e7e49d9932f4ff888b584292400c2b3cf6438dd0 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.210844 4877 scope.go:117] "RemoveContainer" containerID="08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211050 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a"} err="failed to get container status \"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\": rpc error: code = NotFound desc = could not find container \"08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a\": container with ID starting with 08f11b6dcb67da73b1715fb78025605ab7f773661e352f755c078d6155766c3a not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211073 4877 scope.go:117] "RemoveContainer" containerID="741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211293 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d"} err="failed to get container status \"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\": rpc error: code = NotFound desc = could not find container \"741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d\": container with ID starting with 741b755b10ed5e24f892f67dadd2c879fb4a1b9998fed11484a79acda5a08d2d not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211312 4877 scope.go:117] "RemoveContainer" containerID="99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211557 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545"} err="failed to get container status \"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\": rpc error: code = NotFound desc = could not find container \"99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545\": container with ID starting with 99c6f7179c77cd3cb8f7384f2c109f924d9b07861db26b701dd378313409a545 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211577 4877 scope.go:117] "RemoveContainer" containerID="b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211889 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa"} err="failed to get container status \"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\": rpc error: code = NotFound desc = could not find container \"b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa\": container with ID starting with b6291fb5294f4a6f0125bd4e4e52f3d5999be8610e868ed6d78e75959fc9b4aa not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.211925 4877 scope.go:117] "RemoveContainer" containerID="dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.212135 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280"} err="failed to get container status \"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\": rpc error: code = NotFound desc = could not find container \"dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280\": container with ID starting with dcc2419ad8804dfc900909885d2902ccadba868ab20dcd83c64b1ce89800e280 not found: ID does not exist" Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.802071 4877 generic.go:334] "Generic (PLEG): container finished" podID="c26f4211-6c76-421f-add4-6d58385c9219" containerID="7a2322ccf7bd711711efe2716ec7704ae6a71488c662c1b0f38c43a213249ac1" exitCode=0 Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.802405 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerDied","Data":"7a2322ccf7bd711711efe2716ec7704ae6a71488c662c1b0f38c43a213249ac1"} Jan 28 16:47:08 crc kubenswrapper[4877]: I0128 16:47:08.802433 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"2ee378138beb92c0f5b56254350aeee1cbba25133d765c6ee4d533d7aa4f4e98"} Jan 28 16:47:09 crc kubenswrapper[4877]: I0128 16:47:09.339263 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3138aa2e-dca5-4d62-aa47-1fd2b559baaf" path="/var/lib/kubelet/pods/3138aa2e-dca5-4d62-aa47-1fd2b559baaf/volumes" Jan 28 16:47:09 crc kubenswrapper[4877]: I0128 16:47:09.812389 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"de36d8ece05e571a4bddfcf582cef4bdecd0a71c117f9a8f92e06480e6fd1909"} Jan 28 16:47:09 crc kubenswrapper[4877]: I0128 16:47:09.812811 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"c5549e699250c479848ac078a8e2de9102fafc76871171b93a79411a1a174881"} Jan 28 16:47:09 crc kubenswrapper[4877]: I0128 16:47:09.812827 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"a1f2550f327cc4296df786069999fb8a2f1e1caacc5e61f8c5cbd15a45d566b8"} Jan 28 16:47:09 crc kubenswrapper[4877]: I0128 16:47:09.812836 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"55b7f860b5594a849d3a0c1e8ee87b16f44f0510acb3fa68d7742e3f72e68bdc"} Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.343067 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf"] Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.344556 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.347862 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-f2ld2" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.349913 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.350117 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.433288 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj"] Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.434437 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.442700 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-zk99q" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.442789 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.449108 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54"] Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.450155 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.473798 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbzcv\" (UniqueName: \"kubernetes.io/projected/041bbf13-d52e-4aae-9750-b20b0e9558cb-kube-api-access-jbzcv\") pod \"obo-prometheus-operator-68bc856cb9-c6nnf\" (UID: \"041bbf13-d52e-4aae-9750-b20b0e9558cb\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.569460 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2cprx"] Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.571610 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.575425 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj\" (UID: \"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.575509 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e3baef9-6c5f-425f-8a26-a790b5e2d1e8-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54\" (UID: \"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.575587 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbzcv\" (UniqueName: \"kubernetes.io/projected/041bbf13-d52e-4aae-9750-b20b0e9558cb-kube-api-access-jbzcv\") pod \"obo-prometheus-operator-68bc856cb9-c6nnf\" (UID: \"041bbf13-d52e-4aae-9750-b20b0e9558cb\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.575623 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e3baef9-6c5f-425f-8a26-a790b5e2d1e8-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54\" (UID: \"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.575641 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj\" (UID: \"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.579798 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.579919 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-kz272" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.603437 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbzcv\" (UniqueName: \"kubernetes.io/projected/041bbf13-d52e-4aae-9750-b20b0e9558cb-kube-api-access-jbzcv\") pod \"obo-prometheus-operator-68bc856cb9-c6nnf\" (UID: \"041bbf13-d52e-4aae-9750-b20b0e9558cb\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.677841 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e3baef9-6c5f-425f-8a26-a790b5e2d1e8-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54\" (UID: \"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.677913 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj\" (UID: \"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.678012 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/38cddd18-356a-4be1-8e45-b908361805bf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2cprx\" (UID: \"38cddd18-356a-4be1-8e45-b908361805bf\") " pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.678056 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj\" (UID: \"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.678116 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e3baef9-6c5f-425f-8a26-a790b5e2d1e8-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54\" (UID: \"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.678205 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dbfk\" (UniqueName: \"kubernetes.io/projected/38cddd18-356a-4be1-8e45-b908361805bf-kube-api-access-2dbfk\") pod \"observability-operator-59bdc8b94-2cprx\" (UID: \"38cddd18-356a-4be1-8e45-b908361805bf\") " pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.682701 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj\" (UID: \"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.685104 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj\" (UID: \"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.685674 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e3baef9-6c5f-425f-8a26-a790b5e2d1e8-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54\" (UID: \"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.692825 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-mzftl"] Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.692987 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.700191 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e3baef9-6c5f-425f-8a26-a790b5e2d1e8-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54\" (UID: \"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.712858 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.718838 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-6c946" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.755578 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(86e70559b4bae3aaeaa507ec5854a0ebd67897c07072e1f1df852cd190cde5a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.755749 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(86e70559b4bae3aaeaa507ec5854a0ebd67897c07072e1f1df852cd190cde5a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.755815 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(86e70559b4bae3aaeaa507ec5854a0ebd67897c07072e1f1df852cd190cde5a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.755954 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators(041bbf13-d52e-4aae-9750-b20b0e9558cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators(041bbf13-d52e-4aae-9750-b20b0e9558cb)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(86e70559b4bae3aaeaa507ec5854a0ebd67897c07072e1f1df852cd190cde5a4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" podUID="041bbf13-d52e-4aae-9750-b20b0e9558cb" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.756264 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.771864 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.779769 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/38cddd18-356a-4be1-8e45-b908361805bf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2cprx\" (UID: \"38cddd18-356a-4be1-8e45-b908361805bf\") " pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.779880 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dbfk\" (UniqueName: \"kubernetes.io/projected/38cddd18-356a-4be1-8e45-b908361805bf-kube-api-access-2dbfk\") pod \"observability-operator-59bdc8b94-2cprx\" (UID: \"38cddd18-356a-4be1-8e45-b908361805bf\") " pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.783250 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/38cddd18-356a-4be1-8e45-b908361805bf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2cprx\" (UID: \"38cddd18-356a-4be1-8e45-b908361805bf\") " pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.803443 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dbfk\" (UniqueName: \"kubernetes.io/projected/38cddd18-356a-4be1-8e45-b908361805bf-kube-api-access-2dbfk\") pod \"observability-operator-59bdc8b94-2cprx\" (UID: \"38cddd18-356a-4be1-8e45-b908361805bf\") " pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.815262 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(38dbd51232dac3909f8ae0370019ed4a78471fdc058700560fd1577daf8b5656): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.815489 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(38dbd51232dac3909f8ae0370019ed4a78471fdc058700560fd1577daf8b5656): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.815571 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(38dbd51232dac3909f8ae0370019ed4a78471fdc058700560fd1577daf8b5656): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.815669 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators(2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators(2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(38dbd51232dac3909f8ae0370019ed4a78471fdc058700560fd1577daf8b5656): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" podUID="2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.822078 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(055dc05969fc479fe1269cc46ff16c5867556893686bf6ec7b4dfd45d3cf4d8f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.822183 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(055dc05969fc479fe1269cc46ff16c5867556893686bf6ec7b4dfd45d3cf4d8f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.822257 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(055dc05969fc479fe1269cc46ff16c5867556893686bf6ec7b4dfd45d3cf4d8f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.822341 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators(2e3baef9-6c5f-425f-8a26-a790b5e2d1e8)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators(2e3baef9-6c5f-425f-8a26-a790b5e2d1e8)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(055dc05969fc479fe1269cc46ff16c5867556893686bf6ec7b4dfd45d3cf4d8f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" podUID="2e3baef9-6c5f-425f-8a26-a790b5e2d1e8" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.827828 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"cfb3b1dd5f3972ab55e93a4700ded8fe054c0cc527ffd1a1c8fe65a6ebacbad8"} Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.828025 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"1909366a6a1c4a87a0aa080df76f964b3089ff5a09430dbe83b6bd4300819466"} Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.881460 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d18fe762-0cf5-444e-a4c0-28e812f435fa-openshift-service-ca\") pod \"perses-operator-5bf474d74f-mzftl\" (UID: \"d18fe762-0cf5-444e-a4c0-28e812f435fa\") " pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.881894 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xftb\" (UniqueName: \"kubernetes.io/projected/d18fe762-0cf5-444e-a4c0-28e812f435fa-kube-api-access-6xftb\") pod \"perses-operator-5bf474d74f-mzftl\" (UID: \"d18fe762-0cf5-444e-a4c0-28e812f435fa\") " pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.888390 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.921728 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(fce5c903e444a1f6c6b3dbc7602697f8a314e00edd4114a7c3a43b1fe283e9ff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.921814 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(fce5c903e444a1f6c6b3dbc7602697f8a314e00edd4114a7c3a43b1fe283e9ff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.921841 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(fce5c903e444a1f6c6b3dbc7602697f8a314e00edd4114a7c3a43b1fe283e9ff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:10 crc kubenswrapper[4877]: E0128 16:47:10.921891 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-2cprx_openshift-operators(38cddd18-356a-4be1-8e45-b908361805bf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-2cprx_openshift-operators(38cddd18-356a-4be1-8e45-b908361805bf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(fce5c903e444a1f6c6b3dbc7602697f8a314e00edd4114a7c3a43b1fe283e9ff): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" podUID="38cddd18-356a-4be1-8e45-b908361805bf" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.984103 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d18fe762-0cf5-444e-a4c0-28e812f435fa-openshift-service-ca\") pod \"perses-operator-5bf474d74f-mzftl\" (UID: \"d18fe762-0cf5-444e-a4c0-28e812f435fa\") " pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.984522 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xftb\" (UniqueName: \"kubernetes.io/projected/d18fe762-0cf5-444e-a4c0-28e812f435fa-kube-api-access-6xftb\") pod \"perses-operator-5bf474d74f-mzftl\" (UID: \"d18fe762-0cf5-444e-a4c0-28e812f435fa\") " pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:10 crc kubenswrapper[4877]: I0128 16:47:10.985240 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/d18fe762-0cf5-444e-a4c0-28e812f435fa-openshift-service-ca\") pod \"perses-operator-5bf474d74f-mzftl\" (UID: \"d18fe762-0cf5-444e-a4c0-28e812f435fa\") " pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:11 crc kubenswrapper[4877]: I0128 16:47:11.003756 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xftb\" (UniqueName: \"kubernetes.io/projected/d18fe762-0cf5-444e-a4c0-28e812f435fa-kube-api-access-6xftb\") pod \"perses-operator-5bf474d74f-mzftl\" (UID: \"d18fe762-0cf5-444e-a4c0-28e812f435fa\") " pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:11 crc kubenswrapper[4877]: I0128 16:47:11.095397 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:11 crc kubenswrapper[4877]: E0128 16:47:11.120094 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(d628f7a8c0c6b120ac198b5026b07c7f88dc97253738fade561bf8d05f3b92ab): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:11 crc kubenswrapper[4877]: E0128 16:47:11.120168 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(d628f7a8c0c6b120ac198b5026b07c7f88dc97253738fade561bf8d05f3b92ab): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:11 crc kubenswrapper[4877]: E0128 16:47:11.120194 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(d628f7a8c0c6b120ac198b5026b07c7f88dc97253738fade561bf8d05f3b92ab): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:11 crc kubenswrapper[4877]: E0128 16:47:11.120246 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-mzftl_openshift-operators(d18fe762-0cf5-444e-a4c0-28e812f435fa)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-mzftl_openshift-operators(d18fe762-0cf5-444e-a4c0-28e812f435fa)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(d628f7a8c0c6b120ac198b5026b07c7f88dc97253738fade561bf8d05f3b92ab): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" podUID="d18fe762-0cf5-444e-a4c0-28e812f435fa" Jan 28 16:47:12 crc kubenswrapper[4877]: I0128 16:47:12.844101 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"eb7353a46870031e5eed8fb9da70ccbf18b6461d0d41efc14fbc3bab9901af2e"} Jan 28 16:47:14 crc kubenswrapper[4877]: I0128 16:47:14.860118 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" event={"ID":"c26f4211-6c76-421f-add4-6d58385c9219","Type":"ContainerStarted","Data":"3cdabc71fbe5da05a2d5970d8bec53604c3214e3e2096bc322025fc322eaa896"} Jan 28 16:47:14 crc kubenswrapper[4877]: I0128 16:47:14.861651 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:14 crc kubenswrapper[4877]: I0128 16:47:14.861918 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:14 crc kubenswrapper[4877]: I0128 16:47:14.861986 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:14 crc kubenswrapper[4877]: I0128 16:47:14.894321 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:14 crc kubenswrapper[4877]: I0128 16:47:14.895096 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:14 crc kubenswrapper[4877]: I0128 16:47:14.922391 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" podStartSLOduration=7.922365034 podStartE2EDuration="7.922365034s" podCreationTimestamp="2026-01-28 16:47:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:47:14.912089225 +0000 UTC m=+738.470416113" watchObservedRunningTime="2026-01-28 16:47:14.922365034 +0000 UTC m=+738.480691942" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.792454 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj"] Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.792632 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.793189 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.822334 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54"] Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.822560 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.823204 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.831386 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(25072e9ce2df96eb81f354d743f3e53385191268cc82e861d24401e7e707d292): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.831514 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(25072e9ce2df96eb81f354d743f3e53385191268cc82e861d24401e7e707d292): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.831550 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(25072e9ce2df96eb81f354d743f3e53385191268cc82e861d24401e7e707d292): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.831613 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators(2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators(2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj_openshift-operators_2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed_0(25072e9ce2df96eb81f354d743f3e53385191268cc82e861d24401e7e707d292): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" podUID="2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.853386 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2cprx"] Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.853898 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.854571 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.859669 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-mzftl"] Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.859892 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.864055 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.864977 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf"] Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.865168 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:15 crc kubenswrapper[4877]: I0128 16:47:15.867189 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.897332 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(e3f217c4f9e7d54d4c04cf9ae5f28cbe2962c24679bc44dc0a2a1a9ad3d5e0ca): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.897411 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(e3f217c4f9e7d54d4c04cf9ae5f28cbe2962c24679bc44dc0a2a1a9ad3d5e0ca): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.897437 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(e3f217c4f9e7d54d4c04cf9ae5f28cbe2962c24679bc44dc0a2a1a9ad3d5e0ca): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.897499 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators(2e3baef9-6c5f-425f-8a26-a790b5e2d1e8)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators(2e3baef9-6c5f-425f-8a26-a790b5e2d1e8)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58c86d7846-krh54_openshift-operators_2e3baef9-6c5f-425f-8a26-a790b5e2d1e8_0(e3f217c4f9e7d54d4c04cf9ae5f28cbe2962c24679bc44dc0a2a1a9ad3d5e0ca): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" podUID="2e3baef9-6c5f-425f-8a26-a790b5e2d1e8" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.945084 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(7cc42108ba08b05073cd01fe2db7d93f25943486b4804ffcbfa843ce3c9cfdc9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.945168 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(7cc42108ba08b05073cd01fe2db7d93f25943486b4804ffcbfa843ce3c9cfdc9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.945198 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(7cc42108ba08b05073cd01fe2db7d93f25943486b4804ffcbfa843ce3c9cfdc9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.945250 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-59bdc8b94-2cprx_openshift-operators(38cddd18-356a-4be1-8e45-b908361805bf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-59bdc8b94-2cprx_openshift-operators(38cddd18-356a-4be1-8e45-b908361805bf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-59bdc8b94-2cprx_openshift-operators_38cddd18-356a-4be1-8e45-b908361805bf_0(7cc42108ba08b05073cd01fe2db7d93f25943486b4804ffcbfa843ce3c9cfdc9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" podUID="38cddd18-356a-4be1-8e45-b908361805bf" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.972758 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(37f87668eb5186afb2474a9d14dc529fc71f47102bf94d1fa59fa3b9921e8a7a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.972859 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(37f87668eb5186afb2474a9d14dc529fc71f47102bf94d1fa59fa3b9921e8a7a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.972895 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(37f87668eb5186afb2474a9d14dc529fc71f47102bf94d1fa59fa3b9921e8a7a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.972952 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5bf474d74f-mzftl_openshift-operators(d18fe762-0cf5-444e-a4c0-28e812f435fa)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5bf474d74f-mzftl_openshift-operators(d18fe762-0cf5-444e-a4c0-28e812f435fa)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5bf474d74f-mzftl_openshift-operators_d18fe762-0cf5-444e-a4c0-28e812f435fa_0(37f87668eb5186afb2474a9d14dc529fc71f47102bf94d1fa59fa3b9921e8a7a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" podUID="d18fe762-0cf5-444e-a4c0-28e812f435fa" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.978818 4877 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(e197e28affa32c1c4c73974371d660323e28c2f37a672a640c791528502405b5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.978953 4877 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(e197e28affa32c1c4c73974371d660323e28c2f37a672a640c791528502405b5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.978980 4877 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(e197e28affa32c1c4c73974371d660323e28c2f37a672a640c791528502405b5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:15 crc kubenswrapper[4877]: E0128 16:47:15.979052 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators(041bbf13-d52e-4aae-9750-b20b0e9558cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators(041bbf13-d52e-4aae-9750-b20b0e9558cb)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators_041bbf13-d52e-4aae-9750-b20b0e9558cb_0(e197e28affa32c1c4c73974371d660323e28c2f37a672a640c791528502405b5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" podUID="041bbf13-d52e-4aae-9750-b20b0e9558cb" Jan 28 16:47:21 crc kubenswrapper[4877]: I0128 16:47:21.331781 4877 scope.go:117] "RemoveContainer" containerID="1a806e67e9fc104f5c007ae476ce9c24b6f511eb3bfb6094c15c3872b5d991f7" Jan 28 16:47:21 crc kubenswrapper[4877]: I0128 16:47:21.919332 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hbxsq_2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a/kube-multus/2.log" Jan 28 16:47:21 crc kubenswrapper[4877]: I0128 16:47:21.919931 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hbxsq" event={"ID":"2a17664e-66c0-4a75-9ac1-50ac0f8f0c7a","Type":"ContainerStarted","Data":"00adbcac12c81ff38ee107765e529a85617ba65e79db3c4e73db7c6c3c0c2d64"} Jan 28 16:47:27 crc kubenswrapper[4877]: I0128 16:47:27.336638 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:27 crc kubenswrapper[4877]: I0128 16:47:27.338576 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" Jan 28 16:47:27 crc kubenswrapper[4877]: I0128 16:47:27.644274 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54"] Jan 28 16:47:27 crc kubenswrapper[4877]: I0128 16:47:27.961188 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" event={"ID":"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8","Type":"ContainerStarted","Data":"90494331fd725ded3cab2cd064eed3a8a0ba775f2ab6e6613af64c0b380206c4"} Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.329554 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.329592 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.329588 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.330732 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.330746 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.331197 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.865553 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-mzftl"] Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.879700 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2cprx"] Jan 28 16:47:29 crc kubenswrapper[4877]: W0128 16:47:29.880561 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd18fe762_0cf5_444e_a4c0_28e812f435fa.slice/crio-c2686633489fa04d25d7c54f4134dc272b3feb3cb98ef841a219cb46045c0bd8 WatchSource:0}: Error finding container c2686633489fa04d25d7c54f4134dc272b3feb3cb98ef841a219cb46045c0bd8: Status 404 returned error can't find the container with id c2686633489fa04d25d7c54f4134dc272b3feb3cb98ef841a219cb46045c0bd8 Jan 28 16:47:29 crc kubenswrapper[4877]: W0128 16:47:29.893813 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38cddd18_356a_4be1_8e45_b908361805bf.slice/crio-4a420a284943af9919cd4aa8e9e86bddce0d0bec4cfe85876cffa1a71176d5ee WatchSource:0}: Error finding container 4a420a284943af9919cd4aa8e9e86bddce0d0bec4cfe85876cffa1a71176d5ee: Status 404 returned error can't find the container with id 4a420a284943af9919cd4aa8e9e86bddce0d0bec4cfe85876cffa1a71176d5ee Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.968886 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj"] Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.984171 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" event={"ID":"d18fe762-0cf5-444e-a4c0-28e812f435fa","Type":"ContainerStarted","Data":"c2686633489fa04d25d7c54f4134dc272b3feb3cb98ef841a219cb46045c0bd8"} Jan 28 16:47:29 crc kubenswrapper[4877]: I0128 16:47:29.985140 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" event={"ID":"38cddd18-356a-4be1-8e45-b908361805bf","Type":"ContainerStarted","Data":"4a420a284943af9919cd4aa8e9e86bddce0d0bec4cfe85876cffa1a71176d5ee"} Jan 28 16:47:30 crc kubenswrapper[4877]: I0128 16:47:30.330103 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:30 crc kubenswrapper[4877]: I0128 16:47:30.330823 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" Jan 28 16:47:30 crc kubenswrapper[4877]: I0128 16:47:30.996860 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" event={"ID":"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed","Type":"ContainerStarted","Data":"0f20ec09d6a29590489a85aeb4e1d0c73458b81831f694fa15b662a2ea3be37a"} Jan 28 16:47:31 crc kubenswrapper[4877]: I0128 16:47:31.215792 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf"] Jan 28 16:47:32 crc kubenswrapper[4877]: I0128 16:47:32.006834 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" event={"ID":"041bbf13-d52e-4aae-9750-b20b0e9558cb","Type":"ContainerStarted","Data":"cdd80673de4bb53a44420bc43a3bcffae9bb02b8aacbe9abc119df5da8496526"} Jan 28 16:47:33 crc kubenswrapper[4877]: I0128 16:47:33.437613 4877 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 16:47:37 crc kubenswrapper[4877]: I0128 16:47:37.076902 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:47:37 crc kubenswrapper[4877]: I0128 16:47:37.078959 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:47:38 crc kubenswrapper[4877]: I0128 16:47:38.012391 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8slsx" Jan 28 16:47:38 crc kubenswrapper[4877]: I0128 16:47:38.062855 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" event={"ID":"2e3baef9-6c5f-425f-8a26-a790b5e2d1e8","Type":"ContainerStarted","Data":"bdcb6db63956e2d22752f17d7b89ff8311e2d030d4443c431737d9fe913de255"} Jan 28 16:47:38 crc kubenswrapper[4877]: I0128 16:47:38.088526 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-krh54" podStartSLOduration=18.283015893 podStartE2EDuration="28.088505657s" podCreationTimestamp="2026-01-28 16:47:10 +0000 UTC" firstStartedPulling="2026-01-28 16:47:27.66623143 +0000 UTC m=+751.224558318" lastFinishedPulling="2026-01-28 16:47:37.471721194 +0000 UTC m=+761.030048082" observedRunningTime="2026-01-28 16:47:38.0879073 +0000 UTC m=+761.646234188" watchObservedRunningTime="2026-01-28 16:47:38.088505657 +0000 UTC m=+761.646832545" Jan 28 16:47:58 crc kubenswrapper[4877]: E0128 16:47:58.501490 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a" Jan 28 16:47:58 crc kubenswrapper[4877]: E0128 16:47:58.502754 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a,Command:[],Args:[--prometheus-config-reloader=$(RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER) --prometheus-instance-selector=app.kubernetes.io/managed-by=observability-operator --alertmanager-instance-selector=app.kubernetes.io/managed-by=observability-operator --thanos-ruler-instance-selector=app.kubernetes.io/managed-by=observability-operator --watch-referenced-objects-in-all-namespaces=true --disable-unmanaged-prometheus-configuration=true],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOGC,Value:30,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER,Value:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{157286400 0} {} 150Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jbzcv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-68bc856cb9-c6nnf_openshift-operators(041bbf13-d52e-4aae-9750-b20b0e9558cb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:47:58 crc kubenswrapper[4877]: E0128 16:47:58.504059 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" podUID="041bbf13-d52e-4aae-9750-b20b0e9558cb" Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.239765 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" event={"ID":"38cddd18-356a-4be1-8e45-b908361805bf","Type":"ContainerStarted","Data":"e4ecdf287ef2bdcfb23c6cc487ef20f19c918b1cb2b6371ce46cf5b910faaa7d"} Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.240838 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.243314 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" event={"ID":"2a4f94f4-5aa0-47cf-a705-adbf4cfb51ed","Type":"ContainerStarted","Data":"127a0165740e02983e163ce8793269d2893be5d1c9242b00851d8243f794efa2"} Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.246949 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" event={"ID":"d18fe762-0cf5-444e-a4c0-28e812f435fa","Type":"ContainerStarted","Data":"24a37754b462db7ec6d4281795a044f0057fa6ecc81422ac2b721a0e7609536e"} Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.247048 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:47:59 crc kubenswrapper[4877]: E0128 16:47:59.247420 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:e7e5f4c5e8ab0ba298ef0295a7137d438a42eb177d9322212cde6ba8f367912a\\\"\"" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" podUID="041bbf13-d52e-4aae-9750-b20b0e9558cb" Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.254405 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.283280 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" podStartSLOduration=20.650402914 podStartE2EDuration="49.283256411s" podCreationTimestamp="2026-01-28 16:47:10 +0000 UTC" firstStartedPulling="2026-01-28 16:47:29.898588464 +0000 UTC m=+753.456915372" lastFinishedPulling="2026-01-28 16:47:58.531441981 +0000 UTC m=+782.089768869" observedRunningTime="2026-01-28 16:47:59.277704581 +0000 UTC m=+782.836031509" watchObservedRunningTime="2026-01-28 16:47:59.283256411 +0000 UTC m=+782.841583299" Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.385756 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58c86d7846-4nmqj" podStartSLOduration=21.873022769 podStartE2EDuration="49.385729183s" podCreationTimestamp="2026-01-28 16:47:10 +0000 UTC" firstStartedPulling="2026-01-28 16:47:30.981055678 +0000 UTC m=+754.539382566" lastFinishedPulling="2026-01-28 16:47:58.493762092 +0000 UTC m=+782.052088980" observedRunningTime="2026-01-28 16:47:59.377037787 +0000 UTC m=+782.935364675" watchObservedRunningTime="2026-01-28 16:47:59.385729183 +0000 UTC m=+782.944056071" Jan 28 16:47:59 crc kubenswrapper[4877]: I0128 16:47:59.385965 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" podStartSLOduration=20.774856449 podStartE2EDuration="49.385958129s" podCreationTimestamp="2026-01-28 16:47:10 +0000 UTC" firstStartedPulling="2026-01-28 16:47:29.884944843 +0000 UTC m=+753.443271721" lastFinishedPulling="2026-01-28 16:47:58.496046513 +0000 UTC m=+782.054373401" observedRunningTime="2026-01-28 16:47:59.352221527 +0000 UTC m=+782.910548435" watchObservedRunningTime="2026-01-28 16:47:59.385958129 +0000 UTC m=+782.944285017" Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.076124 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.076543 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.076602 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.077393 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"009b4d91077431e907042d1b0b9c62d4fcd7c95df803c80a1b641a0306375a2c"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.077457 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://009b4d91077431e907042d1b0b9c62d4fcd7c95df803c80a1b641a0306375a2c" gracePeriod=600 Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.311548 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="009b4d91077431e907042d1b0b9c62d4fcd7c95df803c80a1b641a0306375a2c" exitCode=0 Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.311986 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"009b4d91077431e907042d1b0b9c62d4fcd7c95df803c80a1b641a0306375a2c"} Jan 28 16:48:07 crc kubenswrapper[4877]: I0128 16:48:07.312042 4877 scope.go:117] "RemoveContainer" containerID="f87fd390161fe2cfa7f8b535b8c64bf410aed6149ff27de8dbf9c3d787641d32" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.320700 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"50d22942424d37ecd19189f5b9ed73adaeed0500bb228f84257ef8a11bc4937c"} Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.541933 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-49nd6"] Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.542970 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-49nd6" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.548263 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.548300 4877 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-d9kvq" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.548628 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.569689 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-49nd6"] Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.582557 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr"] Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.583776 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.588010 4877 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-2s26d" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.589949 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-42v7p"] Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.591193 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.599187 4877 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-7r86c" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.623581 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l77sz\" (UniqueName: \"kubernetes.io/projected/90191161-1dd9-43e3-9426-506643d8274c-kube-api-access-l77sz\") pod \"cert-manager-858654f9db-49nd6\" (UID: \"90191161-1dd9-43e3-9426-506643d8274c\") " pod="cert-manager/cert-manager-858654f9db-49nd6" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.635382 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr"] Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.668709 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-42v7p"] Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.725988 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghjld\" (UniqueName: \"kubernetes.io/projected/c963fe29-366b-4362-9a13-89423728237d-kube-api-access-ghjld\") pod \"cert-manager-webhook-687f57d79b-42v7p\" (UID: \"c963fe29-366b-4362-9a13-89423728237d\") " pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.726152 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l77sz\" (UniqueName: \"kubernetes.io/projected/90191161-1dd9-43e3-9426-506643d8274c-kube-api-access-l77sz\") pod \"cert-manager-858654f9db-49nd6\" (UID: \"90191161-1dd9-43e3-9426-506643d8274c\") " pod="cert-manager/cert-manager-858654f9db-49nd6" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.726219 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dfmm\" (UniqueName: \"kubernetes.io/projected/f9b56dd3-efef-4366-bd6d-63ff5a46342c-kube-api-access-7dfmm\") pod \"cert-manager-cainjector-cf98fcc89-5qfgr\" (UID: \"f9b56dd3-efef-4366-bd6d-63ff5a46342c\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.747669 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l77sz\" (UniqueName: \"kubernetes.io/projected/90191161-1dd9-43e3-9426-506643d8274c-kube-api-access-l77sz\") pod \"cert-manager-858654f9db-49nd6\" (UID: \"90191161-1dd9-43e3-9426-506643d8274c\") " pod="cert-manager/cert-manager-858654f9db-49nd6" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.828606 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghjld\" (UniqueName: \"kubernetes.io/projected/c963fe29-366b-4362-9a13-89423728237d-kube-api-access-ghjld\") pod \"cert-manager-webhook-687f57d79b-42v7p\" (UID: \"c963fe29-366b-4362-9a13-89423728237d\") " pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.828756 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dfmm\" (UniqueName: \"kubernetes.io/projected/f9b56dd3-efef-4366-bd6d-63ff5a46342c-kube-api-access-7dfmm\") pod \"cert-manager-cainjector-cf98fcc89-5qfgr\" (UID: \"f9b56dd3-efef-4366-bd6d-63ff5a46342c\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.851169 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dfmm\" (UniqueName: \"kubernetes.io/projected/f9b56dd3-efef-4366-bd6d-63ff5a46342c-kube-api-access-7dfmm\") pod \"cert-manager-cainjector-cf98fcc89-5qfgr\" (UID: \"f9b56dd3-efef-4366-bd6d-63ff5a46342c\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.851328 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghjld\" (UniqueName: \"kubernetes.io/projected/c963fe29-366b-4362-9a13-89423728237d-kube-api-access-ghjld\") pod \"cert-manager-webhook-687f57d79b-42v7p\" (UID: \"c963fe29-366b-4362-9a13-89423728237d\") " pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.865285 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-49nd6" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.910502 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" Jan 28 16:48:08 crc kubenswrapper[4877]: I0128 16:48:08.953899 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" Jan 28 16:48:09 crc kubenswrapper[4877]: I0128 16:48:09.183556 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr"] Jan 28 16:48:09 crc kubenswrapper[4877]: I0128 16:48:09.257767 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-42v7p"] Jan 28 16:48:09 crc kubenswrapper[4877]: W0128 16:48:09.260575 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc963fe29_366b_4362_9a13_89423728237d.slice/crio-a502cdd3867d1e131eb2d4d3049010cf564bc2edd0620d3d3bac51a253d9cde4 WatchSource:0}: Error finding container a502cdd3867d1e131eb2d4d3049010cf564bc2edd0620d3d3bac51a253d9cde4: Status 404 returned error can't find the container with id a502cdd3867d1e131eb2d4d3049010cf564bc2edd0620d3d3bac51a253d9cde4 Jan 28 16:48:09 crc kubenswrapper[4877]: I0128 16:48:09.310299 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-49nd6"] Jan 28 16:48:09 crc kubenswrapper[4877]: W0128 16:48:09.311893 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90191161_1dd9_43e3_9426_506643d8274c.slice/crio-727786bda9ff338a05966d811036842eddb46c7d61b1081321910f3499a21573 WatchSource:0}: Error finding container 727786bda9ff338a05966d811036842eddb46c7d61b1081321910f3499a21573: Status 404 returned error can't find the container with id 727786bda9ff338a05966d811036842eddb46c7d61b1081321910f3499a21573 Jan 28 16:48:09 crc kubenswrapper[4877]: I0128 16:48:09.329265 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-49nd6" event={"ID":"90191161-1dd9-43e3-9426-506643d8274c","Type":"ContainerStarted","Data":"727786bda9ff338a05966d811036842eddb46c7d61b1081321910f3499a21573"} Jan 28 16:48:09 crc kubenswrapper[4877]: I0128 16:48:09.341822 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" event={"ID":"c963fe29-366b-4362-9a13-89423728237d","Type":"ContainerStarted","Data":"a502cdd3867d1e131eb2d4d3049010cf564bc2edd0620d3d3bac51a253d9cde4"} Jan 28 16:48:09 crc kubenswrapper[4877]: I0128 16:48:09.341884 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" event={"ID":"f9b56dd3-efef-4366-bd6d-63ff5a46342c","Type":"ContainerStarted","Data":"83e65788c71657bd5438210f64fbceac1a9c14bf81c8648478b08564fb93701a"} Jan 28 16:48:11 crc kubenswrapper[4877]: I0128 16:48:11.098916 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" Jan 28 16:48:17 crc kubenswrapper[4877]: I0128 16:48:17.401228 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" event={"ID":"041bbf13-d52e-4aae-9750-b20b0e9558cb","Type":"ContainerStarted","Data":"67ad441a4746bdb677ba858abfe8d4b37a42b5360ae26033947aeb4bfcd8d1e1"} Jan 28 16:48:17 crc kubenswrapper[4877]: I0128 16:48:17.429451 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-c6nnf" podStartSLOduration=22.080906019 podStartE2EDuration="1m7.42941491s" podCreationTimestamp="2026-01-28 16:47:10 +0000 UTC" firstStartedPulling="2026-01-28 16:47:31.234848479 +0000 UTC m=+754.793175367" lastFinishedPulling="2026-01-28 16:48:16.58335736 +0000 UTC m=+800.141684258" observedRunningTime="2026-01-28 16:48:17.422121992 +0000 UTC m=+800.980448880" watchObservedRunningTime="2026-01-28 16:48:17.42941491 +0000 UTC m=+800.987741788" Jan 28 16:48:21 crc kubenswrapper[4877]: I0128 16:48:21.453421 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" event={"ID":"c963fe29-366b-4362-9a13-89423728237d","Type":"ContainerStarted","Data":"ded2b51952774059d2bcb4bd64da605d211dd5541170ffcc424483101ea618f6"} Jan 28 16:48:21 crc kubenswrapper[4877]: I0128 16:48:21.454190 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" Jan 28 16:48:21 crc kubenswrapper[4877]: I0128 16:48:21.455551 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" event={"ID":"f9b56dd3-efef-4366-bd6d-63ff5a46342c","Type":"ContainerStarted","Data":"2afbf7bc636f0d722819f774b3a149c3865d1b4e1516265db090a2e0c88d3a6e"} Jan 28 16:48:21 crc kubenswrapper[4877]: I0128 16:48:21.457167 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-49nd6" event={"ID":"90191161-1dd9-43e3-9426-506643d8274c","Type":"ContainerStarted","Data":"7960a831c1d4cbe4c8c7cc119704e2fdad15f5f105d3849b5c34b7526dd6c2b6"} Jan 28 16:48:21 crc kubenswrapper[4877]: I0128 16:48:21.475227 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" podStartSLOduration=2.159532443 podStartE2EDuration="13.475193698s" podCreationTimestamp="2026-01-28 16:48:08 +0000 UTC" firstStartedPulling="2026-01-28 16:48:09.263136582 +0000 UTC m=+792.821463470" lastFinishedPulling="2026-01-28 16:48:20.578797837 +0000 UTC m=+804.137124725" observedRunningTime="2026-01-28 16:48:21.472796373 +0000 UTC m=+805.031123261" watchObservedRunningTime="2026-01-28 16:48:21.475193698 +0000 UTC m=+805.033520576" Jan 28 16:48:21 crc kubenswrapper[4877]: I0128 16:48:21.498593 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-49nd6" podStartSLOduration=2.13057232 podStartE2EDuration="13.49856405s" podCreationTimestamp="2026-01-28 16:48:08 +0000 UTC" firstStartedPulling="2026-01-28 16:48:09.314520482 +0000 UTC m=+792.872847370" lastFinishedPulling="2026-01-28 16:48:20.682512212 +0000 UTC m=+804.240839100" observedRunningTime="2026-01-28 16:48:21.492326961 +0000 UTC m=+805.050653859" watchObservedRunningTime="2026-01-28 16:48:21.49856405 +0000 UTC m=+805.056890938" Jan 28 16:48:21 crc kubenswrapper[4877]: I0128 16:48:21.523596 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5qfgr" podStartSLOduration=2.144012185 podStartE2EDuration="13.523566767s" podCreationTimestamp="2026-01-28 16:48:08 +0000 UTC" firstStartedPulling="2026-01-28 16:48:09.192201705 +0000 UTC m=+792.750528593" lastFinishedPulling="2026-01-28 16:48:20.571756287 +0000 UTC m=+804.130083175" observedRunningTime="2026-01-28 16:48:21.506528526 +0000 UTC m=+805.064855414" watchObservedRunningTime="2026-01-28 16:48:21.523566767 +0000 UTC m=+805.081893655" Jan 28 16:48:28 crc kubenswrapper[4877]: I0128 16:48:28.958303 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.628141 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r"] Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.630293 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.633269 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.640337 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r"] Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.741102 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.741202 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2csgx\" (UniqueName: \"kubernetes.io/projected/ddb4f7da-694e-4169-b483-858c938ecaeb-kube-api-access-2csgx\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.741252 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.816620 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4"] Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.817963 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.838117 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4"] Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.843194 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.843246 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2csgx\" (UniqueName: \"kubernetes.io/projected/ddb4f7da-694e-4169-b483-858c938ecaeb-kube-api-access-2csgx\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.843289 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.844170 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-util\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.844382 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-bundle\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.872350 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2csgx\" (UniqueName: \"kubernetes.io/projected/ddb4f7da-694e-4169-b483-858c938ecaeb-kube-api-access-2csgx\") pod \"19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.944777 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fg7g\" (UniqueName: \"kubernetes.io/projected/00066e1b-d4d0-4b82-ae77-5ec880ed600d-kube-api-access-2fg7g\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.944851 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.944905 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:50 crc kubenswrapper[4877]: I0128 16:48:50.961031 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.047139 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.047665 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fg7g\" (UniqueName: \"kubernetes.io/projected/00066e1b-d4d0-4b82-ae77-5ec880ed600d-kube-api-access-2fg7g\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.047720 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.051593 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-util\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.054976 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-bundle\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.078943 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fg7g\" (UniqueName: \"kubernetes.io/projected/00066e1b-d4d0-4b82-ae77-5ec880ed600d-kube-api-access-2fg7g\") pod \"40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.135108 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.226458 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r"] Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.361998 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4"] Jan 28 16:48:51 crc kubenswrapper[4877]: W0128 16:48:51.371769 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00066e1b_d4d0_4b82_ae77_5ec880ed600d.slice/crio-6eb4403d573f8f560d714da49233fd03b17e64c7a0a65e480a71935e7ca9dfd7 WatchSource:0}: Error finding container 6eb4403d573f8f560d714da49233fd03b17e64c7a0a65e480a71935e7ca9dfd7: Status 404 returned error can't find the container with id 6eb4403d573f8f560d714da49233fd03b17e64c7a0a65e480a71935e7ca9dfd7 Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.704093 4877 generic.go:334] "Generic (PLEG): container finished" podID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerID="108545bee23c1f290e2f1f48bf8b12aac2a46247974c43f91d4d454f651b8400" exitCode=0 Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.704215 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" event={"ID":"00066e1b-d4d0-4b82-ae77-5ec880ed600d","Type":"ContainerDied","Data":"108545bee23c1f290e2f1f48bf8b12aac2a46247974c43f91d4d454f651b8400"} Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.704391 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" event={"ID":"00066e1b-d4d0-4b82-ae77-5ec880ed600d","Type":"ContainerStarted","Data":"6eb4403d573f8f560d714da49233fd03b17e64c7a0a65e480a71935e7ca9dfd7"} Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.707862 4877 generic.go:334] "Generic (PLEG): container finished" podID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerID="585d22b40e9a2ca11e23b2ca525609a6735bed5e1cda11f24838da59f0a4b928" exitCode=0 Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.707921 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" event={"ID":"ddb4f7da-694e-4169-b483-858c938ecaeb","Type":"ContainerDied","Data":"585d22b40e9a2ca11e23b2ca525609a6735bed5e1cda11f24838da59f0a4b928"} Jan 28 16:48:51 crc kubenswrapper[4877]: I0128 16:48:51.707958 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" event={"ID":"ddb4f7da-694e-4169-b483-858c938ecaeb","Type":"ContainerStarted","Data":"3bc53ca4d11038e781cd387b3cd2156f408f11af9f428c08d9b3d301aed9ae37"} Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.375925 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-btcjg"] Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.378816 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.395556 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-btcjg"] Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.513261 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-catalog-content\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.513465 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-utilities\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.513604 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c254m\" (UniqueName: \"kubernetes.io/projected/f1ab6861-bb44-4191-bd90-93a9899e5ae3-kube-api-access-c254m\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.615520 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-utilities\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.615674 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c254m\" (UniqueName: \"kubernetes.io/projected/f1ab6861-bb44-4191-bd90-93a9899e5ae3-kube-api-access-c254m\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.615772 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-catalog-content\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.616167 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-utilities\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.616319 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-catalog-content\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.638760 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c254m\" (UniqueName: \"kubernetes.io/projected/f1ab6861-bb44-4191-bd90-93a9899e5ae3-kube-api-access-c254m\") pod \"redhat-operators-btcjg\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:54 crc kubenswrapper[4877]: I0128 16:48:54.705297 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:48:55 crc kubenswrapper[4877]: I0128 16:48:55.785278 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-btcjg"] Jan 28 16:48:56 crc kubenswrapper[4877]: I0128 16:48:56.753108 4877 generic.go:334] "Generic (PLEG): container finished" podID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerID="17af8ef5438f1c9037d50f5f594e6250f22808b482ca729ee4acd102e3958bf9" exitCode=0 Jan 28 16:48:56 crc kubenswrapper[4877]: I0128 16:48:56.753186 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btcjg" event={"ID":"f1ab6861-bb44-4191-bd90-93a9899e5ae3","Type":"ContainerDied","Data":"17af8ef5438f1c9037d50f5f594e6250f22808b482ca729ee4acd102e3958bf9"} Jan 28 16:48:56 crc kubenswrapper[4877]: I0128 16:48:56.753678 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btcjg" event={"ID":"f1ab6861-bb44-4191-bd90-93a9899e5ae3","Type":"ContainerStarted","Data":"d0d81ba91779bfbf70056acdb2b0e31ce8a6eca9dd381bb6a0d7f6fbc62d9acc"} Jan 28 16:48:56 crc kubenswrapper[4877]: I0128 16:48:56.758848 4877 generic.go:334] "Generic (PLEG): container finished" podID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerID="1b640582fd786aba84b17cc7965d83604807cde36d2d080b5bf9da8f214f2ed6" exitCode=0 Jan 28 16:48:56 crc kubenswrapper[4877]: I0128 16:48:56.759245 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" event={"ID":"00066e1b-d4d0-4b82-ae77-5ec880ed600d","Type":"ContainerDied","Data":"1b640582fd786aba84b17cc7965d83604807cde36d2d080b5bf9da8f214f2ed6"} Jan 28 16:48:56 crc kubenswrapper[4877]: I0128 16:48:56.763952 4877 generic.go:334] "Generic (PLEG): container finished" podID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerID="253fc3fc408180f540be0062bd4f2f7425934750609db6675ec6eb569029c239" exitCode=0 Jan 28 16:48:56 crc kubenswrapper[4877]: I0128 16:48:56.764017 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" event={"ID":"ddb4f7da-694e-4169-b483-858c938ecaeb","Type":"ContainerDied","Data":"253fc3fc408180f540be0062bd4f2f7425934750609db6675ec6eb569029c239"} Jan 28 16:48:57 crc kubenswrapper[4877]: I0128 16:48:57.776090 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" event={"ID":"00066e1b-d4d0-4b82-ae77-5ec880ed600d","Type":"ContainerStarted","Data":"770c1dfab2fe6caf784ca6fc41accb5eaad477f6b0cf62f8c41502df350128b1"} Jan 28 16:48:57 crc kubenswrapper[4877]: I0128 16:48:57.778867 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" event={"ID":"ddb4f7da-694e-4169-b483-858c938ecaeb","Type":"ContainerStarted","Data":"9a746e8bb6be30a0317be24f735855e9ce881e4e8f2866ec78859a15778f3cee"} Jan 28 16:48:57 crc kubenswrapper[4877]: I0128 16:48:57.804108 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" podStartSLOduration=4.078320968 podStartE2EDuration="7.804079332s" podCreationTimestamp="2026-01-28 16:48:50 +0000 UTC" firstStartedPulling="2026-01-28 16:48:51.707014781 +0000 UTC m=+835.265341669" lastFinishedPulling="2026-01-28 16:48:55.432773145 +0000 UTC m=+838.991100033" observedRunningTime="2026-01-28 16:48:57.801292907 +0000 UTC m=+841.359619785" watchObservedRunningTime="2026-01-28 16:48:57.804079332 +0000 UTC m=+841.362406220" Jan 28 16:48:57 crc kubenswrapper[4877]: I0128 16:48:57.818679 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" podStartSLOduration=4.094158746 podStartE2EDuration="7.818652966s" podCreationTimestamp="2026-01-28 16:48:50 +0000 UTC" firstStartedPulling="2026-01-28 16:48:51.709282833 +0000 UTC m=+835.267609721" lastFinishedPulling="2026-01-28 16:48:55.433777053 +0000 UTC m=+838.992103941" observedRunningTime="2026-01-28 16:48:57.817807723 +0000 UTC m=+841.376134611" watchObservedRunningTime="2026-01-28 16:48:57.818652966 +0000 UTC m=+841.376979844" Jan 28 16:48:58 crc kubenswrapper[4877]: I0128 16:48:58.790415 4877 generic.go:334] "Generic (PLEG): container finished" podID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerID="9a746e8bb6be30a0317be24f735855e9ce881e4e8f2866ec78859a15778f3cee" exitCode=0 Jan 28 16:48:58 crc kubenswrapper[4877]: I0128 16:48:58.790517 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" event={"ID":"ddb4f7da-694e-4169-b483-858c938ecaeb","Type":"ContainerDied","Data":"9a746e8bb6be30a0317be24f735855e9ce881e4e8f2866ec78859a15778f3cee"} Jan 28 16:48:58 crc kubenswrapper[4877]: I0128 16:48:58.794014 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btcjg" event={"ID":"f1ab6861-bb44-4191-bd90-93a9899e5ae3","Type":"ContainerStarted","Data":"cc2e83d8beb855bf6adb82bb19c8427ff407e9b014e8c3a442f5f0e3721c87be"} Jan 28 16:48:58 crc kubenswrapper[4877]: I0128 16:48:58.798083 4877 generic.go:334] "Generic (PLEG): container finished" podID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerID="770c1dfab2fe6caf784ca6fc41accb5eaad477f6b0cf62f8c41502df350128b1" exitCode=0 Jan 28 16:48:58 crc kubenswrapper[4877]: I0128 16:48:58.798141 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" event={"ID":"00066e1b-d4d0-4b82-ae77-5ec880ed600d","Type":"ContainerDied","Data":"770c1dfab2fe6caf784ca6fc41accb5eaad477f6b0cf62f8c41502df350128b1"} Jan 28 16:48:59 crc kubenswrapper[4877]: I0128 16:48:59.808888 4877 generic.go:334] "Generic (PLEG): container finished" podID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerID="cc2e83d8beb855bf6adb82bb19c8427ff407e9b014e8c3a442f5f0e3721c87be" exitCode=0 Jan 28 16:48:59 crc kubenswrapper[4877]: I0128 16:48:59.809163 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btcjg" event={"ID":"f1ab6861-bb44-4191-bd90-93a9899e5ae3","Type":"ContainerDied","Data":"cc2e83d8beb855bf6adb82bb19c8427ff407e9b014e8c3a442f5f0e3721c87be"} Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.130774 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.137317 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.215344 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-util\") pod \"ddb4f7da-694e-4169-b483-858c938ecaeb\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.216281 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-bundle\") pod \"ddb4f7da-694e-4169-b483-858c938ecaeb\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.216425 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2csgx\" (UniqueName: \"kubernetes.io/projected/ddb4f7da-694e-4169-b483-858c938ecaeb-kube-api-access-2csgx\") pod \"ddb4f7da-694e-4169-b483-858c938ecaeb\" (UID: \"ddb4f7da-694e-4169-b483-858c938ecaeb\") " Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.217946 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-bundle" (OuterVolumeSpecName: "bundle") pod "ddb4f7da-694e-4169-b483-858c938ecaeb" (UID: "ddb4f7da-694e-4169-b483-858c938ecaeb"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.222824 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddb4f7da-694e-4169-b483-858c938ecaeb-kube-api-access-2csgx" (OuterVolumeSpecName: "kube-api-access-2csgx") pod "ddb4f7da-694e-4169-b483-858c938ecaeb" (UID: "ddb4f7da-694e-4169-b483-858c938ecaeb"). InnerVolumeSpecName "kube-api-access-2csgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.226346 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-util" (OuterVolumeSpecName: "util") pod "ddb4f7da-694e-4169-b483-858c938ecaeb" (UID: "ddb4f7da-694e-4169-b483-858c938ecaeb"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.317945 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-util\") pod \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.318144 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fg7g\" (UniqueName: \"kubernetes.io/projected/00066e1b-d4d0-4b82-ae77-5ec880ed600d-kube-api-access-2fg7g\") pod \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.318284 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-bundle\") pod \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\" (UID: \"00066e1b-d4d0-4b82-ae77-5ec880ed600d\") " Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.318840 4877 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-util\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.318881 4877 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddb4f7da-694e-4169-b483-858c938ecaeb-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.318934 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2csgx\" (UniqueName: \"kubernetes.io/projected/ddb4f7da-694e-4169-b483-858c938ecaeb-kube-api-access-2csgx\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.319742 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-bundle" (OuterVolumeSpecName: "bundle") pod "00066e1b-d4d0-4b82-ae77-5ec880ed600d" (UID: "00066e1b-d4d0-4b82-ae77-5ec880ed600d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.322182 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00066e1b-d4d0-4b82-ae77-5ec880ed600d-kube-api-access-2fg7g" (OuterVolumeSpecName: "kube-api-access-2fg7g") pod "00066e1b-d4d0-4b82-ae77-5ec880ed600d" (UID: "00066e1b-d4d0-4b82-ae77-5ec880ed600d"). InnerVolumeSpecName "kube-api-access-2fg7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.333469 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-util" (OuterVolumeSpecName: "util") pod "00066e1b-d4d0-4b82-ae77-5ec880ed600d" (UID: "00066e1b-d4d0-4b82-ae77-5ec880ed600d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.420406 4877 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.420435 4877 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/00066e1b-d4d0-4b82-ae77-5ec880ed600d-util\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.420444 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fg7g\" (UniqueName: \"kubernetes.io/projected/00066e1b-d4d0-4b82-ae77-5ec880ed600d-kube-api-access-2fg7g\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.821406 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" event={"ID":"00066e1b-d4d0-4b82-ae77-5ec880ed600d","Type":"ContainerDied","Data":"6eb4403d573f8f560d714da49233fd03b17e64c7a0a65e480a71935e7ca9dfd7"} Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.821460 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6eb4403d573f8f560d714da49233fd03b17e64c7a0a65e480a71935e7ca9dfd7" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.821632 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/40d905839fa7263f1f473fab6e11a9af2a700db4f753f3af512410360bfgpn4" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.823971 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" event={"ID":"ddb4f7da-694e-4169-b483-858c938ecaeb","Type":"ContainerDied","Data":"3bc53ca4d11038e781cd387b3cd2156f408f11af9f428c08d9b3d301aed9ae37"} Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.824037 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bc53ca4d11038e781cd387b3cd2156f408f11af9f428c08d9b3d301aed9ae37" Jan 28 16:49:00 crc kubenswrapper[4877]: I0128 16:49:00.824205 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/19f7b28a9b43ae652fc2e0b84ee4ec326dbd0a997d417d0c402b7249a28ks2r" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.037252 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k"] Jan 28 16:49:07 crc kubenswrapper[4877]: E0128 16:49:07.038244 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerName="pull" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038262 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerName="pull" Jan 28 16:49:07 crc kubenswrapper[4877]: E0128 16:49:07.038277 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerName="util" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038286 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerName="util" Jan 28 16:49:07 crc kubenswrapper[4877]: E0128 16:49:07.038305 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerName="extract" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038315 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerName="extract" Jan 28 16:49:07 crc kubenswrapper[4877]: E0128 16:49:07.038336 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerName="extract" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038344 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerName="extract" Jan 28 16:49:07 crc kubenswrapper[4877]: E0128 16:49:07.038365 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerName="util" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038375 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerName="util" Jan 28 16:49:07 crc kubenswrapper[4877]: E0128 16:49:07.038391 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerName="pull" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038399 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerName="pull" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038590 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="00066e1b-d4d0-4b82-ae77-5ec880ed600d" containerName="extract" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.038620 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddb4f7da-694e-4169-b483-858c938ecaeb" containerName="extract" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.039659 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.042068 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.042280 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.042892 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-ljfh6" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.045518 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.045710 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.047294 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.059349 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k"] Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.153134 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-webhook-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.153200 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/930eb8ce-5fe9-4e7d-a700-d52614d5915a-manager-config\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.153234 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.153309 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-apiservice-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.153328 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dr9x2\" (UniqueName: \"kubernetes.io/projected/930eb8ce-5fe9-4e7d-a700-d52614d5915a-kube-api-access-dr9x2\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.255522 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-webhook-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.255587 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/930eb8ce-5fe9-4e7d-a700-d52614d5915a-manager-config\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.255622 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.255663 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-apiservice-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.255683 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dr9x2\" (UniqueName: \"kubernetes.io/projected/930eb8ce-5fe9-4e7d-a700-d52614d5915a-kube-api-access-dr9x2\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.257079 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/930eb8ce-5fe9-4e7d-a700-d52614d5915a-manager-config\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.263868 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.281648 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-apiservice-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.283268 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/930eb8ce-5fe9-4e7d-a700-d52614d5915a-webhook-cert\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.308164 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dr9x2\" (UniqueName: \"kubernetes.io/projected/930eb8ce-5fe9-4e7d-a700-d52614d5915a-kube-api-access-dr9x2\") pod \"loki-operator-controller-manager-dd586d7ff-8kr7k\" (UID: \"930eb8ce-5fe9-4e7d-a700-d52614d5915a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.362644 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.813388 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k"] Jan 28 16:49:07 crc kubenswrapper[4877]: I0128 16:49:07.889527 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" event={"ID":"930eb8ce-5fe9-4e7d-a700-d52614d5915a","Type":"ContainerStarted","Data":"501bd26bd0fb22fef0219a956806172fc6f1af86dc03879632d4a40b2bd93a0d"} Jan 28 16:49:11 crc kubenswrapper[4877]: I0128 16:49:11.919071 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btcjg" event={"ID":"f1ab6861-bb44-4191-bd90-93a9899e5ae3","Type":"ContainerStarted","Data":"dbbdbce198c6c23b44a9043a84f2ee130c6fd0f6d0f3c826eaffa067303fb798"} Jan 28 16:49:11 crc kubenswrapper[4877]: I0128 16:49:11.942321 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-btcjg" podStartSLOduration=3.111435741 podStartE2EDuration="17.942294464s" podCreationTimestamp="2026-01-28 16:48:54 +0000 UTC" firstStartedPulling="2026-01-28 16:48:56.755154886 +0000 UTC m=+840.313481774" lastFinishedPulling="2026-01-28 16:49:11.586013599 +0000 UTC m=+855.144340497" observedRunningTime="2026-01-28 16:49:11.938142951 +0000 UTC m=+855.496469849" watchObservedRunningTime="2026-01-28 16:49:11.942294464 +0000 UTC m=+855.500621352" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.475240 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9"] Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.476183 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.478015 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.478334 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-tjb8j" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.482518 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.501940 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9"] Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.545858 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh5gp\" (UniqueName: \"kubernetes.io/projected/126bdf3f-5cfc-4ce9-8d40-1478ff0dda20-kube-api-access-sh5gp\") pod \"cluster-logging-operator-79cf69ddc8-t57v9\" (UID: \"126bdf3f-5cfc-4ce9-8d40-1478ff0dda20\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.647582 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh5gp\" (UniqueName: \"kubernetes.io/projected/126bdf3f-5cfc-4ce9-8d40-1478ff0dda20-kube-api-access-sh5gp\") pod \"cluster-logging-operator-79cf69ddc8-t57v9\" (UID: \"126bdf3f-5cfc-4ce9-8d40-1478ff0dda20\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.670318 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh5gp\" (UniqueName: \"kubernetes.io/projected/126bdf3f-5cfc-4ce9-8d40-1478ff0dda20-kube-api-access-sh5gp\") pod \"cluster-logging-operator-79cf69ddc8-t57v9\" (UID: \"126bdf3f-5cfc-4ce9-8d40-1478ff0dda20\") " pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" Jan 28 16:49:12 crc kubenswrapper[4877]: I0128 16:49:12.791650 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" Jan 28 16:49:13 crc kubenswrapper[4877]: I0128 16:49:13.091328 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9"] Jan 28 16:49:13 crc kubenswrapper[4877]: I0128 16:49:13.999531 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" event={"ID":"126bdf3f-5cfc-4ce9-8d40-1478ff0dda20","Type":"ContainerStarted","Data":"ee85f26b5ad928e0cb7806d23e6acfdab06059fedceefb50c8d05cdbfb4cb08c"} Jan 28 16:49:14 crc kubenswrapper[4877]: I0128 16:49:14.705946 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:49:14 crc kubenswrapper[4877]: I0128 16:49:14.706380 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:49:15 crc kubenswrapper[4877]: I0128 16:49:15.762633 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-btcjg" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="registry-server" probeResult="failure" output=< Jan 28 16:49:15 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 16:49:15 crc kubenswrapper[4877]: > Jan 28 16:49:20 crc kubenswrapper[4877]: I0128 16:49:20.070434 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" event={"ID":"930eb8ce-5fe9-4e7d-a700-d52614d5915a","Type":"ContainerStarted","Data":"9bd2b8d1a9c4860cd6b0b2819cc1af199327e0e6ba0f5b27c8291d1fb78e408e"} Jan 28 16:49:22 crc kubenswrapper[4877]: I0128 16:49:22.106059 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" event={"ID":"126bdf3f-5cfc-4ce9-8d40-1478ff0dda20","Type":"ContainerStarted","Data":"bd074c0e16c57ade94fa0809310177d0a9f083ec86e049318fbcd67e9002150d"} Jan 28 16:49:22 crc kubenswrapper[4877]: I0128 16:49:22.128657 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-79cf69ddc8-t57v9" podStartSLOduration=1.624200737 podStartE2EDuration="10.128633319s" podCreationTimestamp="2026-01-28 16:49:12 +0000 UTC" firstStartedPulling="2026-01-28 16:49:13.112655013 +0000 UTC m=+856.670981901" lastFinishedPulling="2026-01-28 16:49:21.617087595 +0000 UTC m=+865.175414483" observedRunningTime="2026-01-28 16:49:22.124540609 +0000 UTC m=+865.682867507" watchObservedRunningTime="2026-01-28 16:49:22.128633319 +0000 UTC m=+865.686960207" Jan 28 16:49:24 crc kubenswrapper[4877]: I0128 16:49:24.768662 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:49:24 crc kubenswrapper[4877]: I0128 16:49:24.815207 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:49:26 crc kubenswrapper[4877]: I0128 16:49:26.559978 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-btcjg"] Jan 28 16:49:26 crc kubenswrapper[4877]: I0128 16:49:26.562043 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-btcjg" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="registry-server" containerID="cri-o://dbbdbce198c6c23b44a9043a84f2ee130c6fd0f6d0f3c826eaffa067303fb798" gracePeriod=2 Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.146630 4877 generic.go:334] "Generic (PLEG): container finished" podID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerID="dbbdbce198c6c23b44a9043a84f2ee130c6fd0f6d0f3c826eaffa067303fb798" exitCode=0 Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.146690 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btcjg" event={"ID":"f1ab6861-bb44-4191-bd90-93a9899e5ae3","Type":"ContainerDied","Data":"dbbdbce198c6c23b44a9043a84f2ee130c6fd0f6d0f3c826eaffa067303fb798"} Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.721033 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.840804 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-catalog-content\") pod \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.840919 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-utilities\") pod \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.841031 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c254m\" (UniqueName: \"kubernetes.io/projected/f1ab6861-bb44-4191-bd90-93a9899e5ae3-kube-api-access-c254m\") pod \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\" (UID: \"f1ab6861-bb44-4191-bd90-93a9899e5ae3\") " Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.843926 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-utilities" (OuterVolumeSpecName: "utilities") pod "f1ab6861-bb44-4191-bd90-93a9899e5ae3" (UID: "f1ab6861-bb44-4191-bd90-93a9899e5ae3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.856916 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1ab6861-bb44-4191-bd90-93a9899e5ae3-kube-api-access-c254m" (OuterVolumeSpecName: "kube-api-access-c254m") pod "f1ab6861-bb44-4191-bd90-93a9899e5ae3" (UID: "f1ab6861-bb44-4191-bd90-93a9899e5ae3"). InnerVolumeSpecName "kube-api-access-c254m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.943765 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.943795 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c254m\" (UniqueName: \"kubernetes.io/projected/f1ab6861-bb44-4191-bd90-93a9899e5ae3-kube-api-access-c254m\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:27 crc kubenswrapper[4877]: I0128 16:49:27.973720 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f1ab6861-bb44-4191-bd90-93a9899e5ae3" (UID: "f1ab6861-bb44-4191-bd90-93a9899e5ae3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.044912 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1ab6861-bb44-4191-bd90-93a9899e5ae3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.156365 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" event={"ID":"930eb8ce-5fe9-4e7d-a700-d52614d5915a","Type":"ContainerStarted","Data":"4e5696581cc2d279ba17e518233c42fcff68f861ca7c4b63cc2603ba25b9cf49"} Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.156653 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.159699 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.161383 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-btcjg" event={"ID":"f1ab6861-bb44-4191-bd90-93a9899e5ae3","Type":"ContainerDied","Data":"d0d81ba91779bfbf70056acdb2b0e31ce8a6eca9dd381bb6a0d7f6fbc62d9acc"} Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.161432 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-btcjg" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.161450 4877 scope.go:117] "RemoveContainer" containerID="dbbdbce198c6c23b44a9043a84f2ee130c6fd0f6d0f3c826eaffa067303fb798" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.186441 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" podStartSLOduration=1.505973047 podStartE2EDuration="21.186409447s" podCreationTimestamp="2026-01-28 16:49:07 +0000 UTC" firstStartedPulling="2026-01-28 16:49:07.824453727 +0000 UTC m=+851.382780625" lastFinishedPulling="2026-01-28 16:49:27.504890137 +0000 UTC m=+871.063217025" observedRunningTime="2026-01-28 16:49:28.181586637 +0000 UTC m=+871.739913545" watchObservedRunningTime="2026-01-28 16:49:28.186409447 +0000 UTC m=+871.744736335" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.189208 4877 scope.go:117] "RemoveContainer" containerID="cc2e83d8beb855bf6adb82bb19c8427ff407e9b014e8c3a442f5f0e3721c87be" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.226995 4877 scope.go:117] "RemoveContainer" containerID="17af8ef5438f1c9037d50f5f594e6250f22808b482ca729ee4acd102e3958bf9" Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.229757 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-btcjg"] Jan 28 16:49:28 crc kubenswrapper[4877]: I0128 16:49:28.238057 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-btcjg"] Jan 28 16:49:29 crc kubenswrapper[4877]: I0128 16:49:29.340384 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" path="/var/lib/kubelet/pods/f1ab6861-bb44-4191-bd90-93a9899e5ae3/volumes" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.552127 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Jan 28 16:49:33 crc kubenswrapper[4877]: E0128 16:49:33.552878 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="extract-content" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.552900 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="extract-content" Jan 28 16:49:33 crc kubenswrapper[4877]: E0128 16:49:33.552926 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="registry-server" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.552934 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="registry-server" Jan 28 16:49:33 crc kubenswrapper[4877]: E0128 16:49:33.552950 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="extract-utilities" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.552959 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="extract-utilities" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.553128 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1ab6861-bb44-4191-bd90-93a9899e5ae3" containerName="registry-server" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.553849 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.556167 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.557890 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.574258 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.646287 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqq2j\" (UniqueName: \"kubernetes.io/projected/13b5057d-125f-4240-b59a-91671be1812c-kube-api-access-tqq2j\") pod \"minio\" (UID: \"13b5057d-125f-4240-b59a-91671be1812c\") " pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.646419 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\") pod \"minio\" (UID: \"13b5057d-125f-4240-b59a-91671be1812c\") " pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.748493 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\") pod \"minio\" (UID: \"13b5057d-125f-4240-b59a-91671be1812c\") " pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.748626 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqq2j\" (UniqueName: \"kubernetes.io/projected/13b5057d-125f-4240-b59a-91671be1812c-kube-api-access-tqq2j\") pod \"minio\" (UID: \"13b5057d-125f-4240-b59a-91671be1812c\") " pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.755208 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.755257 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\") pod \"minio\" (UID: \"13b5057d-125f-4240-b59a-91671be1812c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/590465dc50a08d371f5a38e08b2541ce873e37bdaa0ab00bfd2337a91c9df5d0/globalmount\"" pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.780625 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqq2j\" (UniqueName: \"kubernetes.io/projected/13b5057d-125f-4240-b59a-91671be1812c-kube-api-access-tqq2j\") pod \"minio\" (UID: \"13b5057d-125f-4240-b59a-91671be1812c\") " pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.790786 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3fdae5ac-55bf-4341-9f3c-218ecf66f208\") pod \"minio\" (UID: \"13b5057d-125f-4240-b59a-91671be1812c\") " pod="minio-dev/minio" Jan 28 16:49:33 crc kubenswrapper[4877]: I0128 16:49:33.896230 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Jan 28 16:49:34 crc kubenswrapper[4877]: I0128 16:49:34.365909 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Jan 28 16:49:35 crc kubenswrapper[4877]: I0128 16:49:35.239868 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"13b5057d-125f-4240-b59a-91671be1812c","Type":"ContainerStarted","Data":"07f6d7221538411a11336ce9ed392cb314de5f4f34ad892a822c590ad5e711b4"} Jan 28 16:49:39 crc kubenswrapper[4877]: I0128 16:49:39.275317 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"13b5057d-125f-4240-b59a-91671be1812c","Type":"ContainerStarted","Data":"8f9f61ca27bba3d4aae4240804379c113acf9fc60de0fef41d339519922d5e82"} Jan 28 16:49:39 crc kubenswrapper[4877]: I0128 16:49:39.298720 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=5.424750441 podStartE2EDuration="9.298678552s" podCreationTimestamp="2026-01-28 16:49:30 +0000 UTC" firstStartedPulling="2026-01-28 16:49:34.37087135 +0000 UTC m=+877.929198239" lastFinishedPulling="2026-01-28 16:49:38.244799462 +0000 UTC m=+881.803126350" observedRunningTime="2026-01-28 16:49:39.292091883 +0000 UTC m=+882.850418781" watchObservedRunningTime="2026-01-28 16:49:39.298678552 +0000 UTC m=+882.857005470" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.667059 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c"] Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.668541 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.672741 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.674361 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-9prqd" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.674665 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.674860 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.674930 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.697240 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c"] Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.835738 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.835838 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-config\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.836091 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsfjb\" (UniqueName: \"kubernetes.io/projected/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-kube-api-access-gsfjb\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.836222 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.836300 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.871436 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-8zdq7"] Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.872749 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.875302 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.876758 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.877003 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.897056 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-8zdq7"] Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.937933 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.938037 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.938079 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-config\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.938112 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsfjb\" (UniqueName: \"kubernetes.io/projected/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-kube-api-access-gsfjb\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.938141 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.939017 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-ca-bundle\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.939831 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-config\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.948666 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-distributor-http\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.952219 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.972107 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsfjb\" (UniqueName: \"kubernetes.io/projected/8edb732d-043a-4fbf-b0b1-da98fdaa9a84-kube-api-access-gsfjb\") pod \"logging-loki-distributor-5f678c8dd6-pl69c\" (UID: \"8edb732d-043a-4fbf-b0b1-da98fdaa9a84\") " pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.984080 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4"] Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.985005 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.987599 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.987936 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Jan 28 16:49:44 crc kubenswrapper[4877]: I0128 16:49:44.991347 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.019628 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.040663 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dbkg\" (UniqueName: \"kubernetes.io/projected/094adba0-094e-453d-87f4-b2098f9fe680-kube-api-access-4dbkg\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.040747 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.040818 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.040836 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.041006 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-s3\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.041171 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/094adba0-094e-453d-87f4-b2098f9fe680-config\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.123498 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.130267 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.144066 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.146586 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.147423 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.148539 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-62q2m" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.162730 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.172438 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.172557 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.172623 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5q4wr\" (UniqueName: \"kubernetes.io/projected/ea81a55a-52f7-471b-bff8-9b49e05d459a-kube-api-access-5q4wr\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.172689 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-s3\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.172827 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/094adba0-094e-453d-87f4-b2098f9fe680-config\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.172910 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dbkg\" (UniqueName: \"kubernetes.io/projected/094adba0-094e-453d-87f4-b2098f9fe680-kube-api-access-4dbkg\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.173034 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.173085 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.173131 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.173178 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.173270 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea81a55a-52f7-471b-bff8-9b49e05d459a-config\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.181383 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.183669 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-ca-bundle\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.184418 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/094adba0-094e-453d-87f4-b2098f9fe680-config\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.196654 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-querier-http\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.197730 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-s3\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.199446 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/094adba0-094e-453d-87f4-b2098f9fe680-logging-loki-querier-grpc\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295041 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295109 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295132 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295156 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295175 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295191 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-rbac\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295214 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295234 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tenants\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295265 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea81a55a-52f7-471b-bff8-9b49e05d459a-config\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295295 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5q4wr\" (UniqueName: \"kubernetes.io/projected/ea81a55a-52f7-471b-bff8-9b49e05d459a-kube-api-access-5q4wr\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295317 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtm8w\" (UniqueName: \"kubernetes.io/projected/f014bcf5-ec99-4a23-a06c-29c2e8213375-kube-api-access-qtm8w\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295346 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-lokistack-gateway\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.295363 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.303717 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.308365 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.332670 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea81a55a-52f7-471b-bff8-9b49e05d459a-config\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.364946 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.365360 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/ea81a55a-52f7-471b-bff8-9b49e05d459a-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.366524 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dbkg\" (UniqueName: \"kubernetes.io/projected/094adba0-094e-453d-87f4-b2098f9fe680-kube-api-access-4dbkg\") pod \"logging-loki-querier-76788598db-8zdq7\" (UID: \"094adba0-094e-453d-87f4-b2098f9fe680\") " pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.375839 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.377234 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.377333 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.385562 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5q4wr\" (UniqueName: \"kubernetes.io/projected/ea81a55a-52f7-471b-bff8-9b49e05d459a-kube-api-access-5q4wr\") pod \"logging-loki-query-frontend-69d9546745-wkbs4\" (UID: \"ea81a55a-52f7-471b-bff8-9b49e05d459a\") " pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.400035 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.401991 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402096 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-lokistack-gateway\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402166 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402198 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402231 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-rbac\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402277 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57wpk\" (UniqueName: \"kubernetes.io/projected/d4a36cc7-b86f-45f7-a422-db7241ba6513-kube-api-access-57wpk\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402308 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tenants\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402370 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402411 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402435 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tenants\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402496 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402545 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.402592 4877 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.402692 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tls-secret podName:f014bcf5-ec99-4a23-a06c-29c2e8213375 nodeName:}" failed. No retries permitted until 2026-01-28 16:49:45.902662128 +0000 UTC m=+889.460989086 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tls-secret") pod "logging-loki-gateway-5f6787f74d-4m9v2" (UID: "f014bcf5-ec99-4a23-a06c-29c2e8213375") : secret "logging-loki-gateway-http" not found Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.404109 4877 configmap.go:193] Couldn't get configMap openshift-logging/logging-loki-gateway-ca-bundle: configmap "logging-loki-gateway-ca-bundle" not found Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.404231 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-ca-bundle podName:f014bcf5-ec99-4a23-a06c-29c2e8213375 nodeName:}" failed. No retries permitted until 2026-01-28 16:49:45.90421113 +0000 UTC m=+889.462538018 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "logging-loki-gateway-ca-bundle" (UniqueName: "kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-ca-bundle") pod "logging-loki-gateway-5f6787f74d-4m9v2" (UID: "f014bcf5-ec99-4a23-a06c-29c2e8213375") : configmap "logging-loki-gateway-ca-bundle" not found Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.404177 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-rbac\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.402605 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtm8w\" (UniqueName: \"kubernetes.io/projected/f014bcf5-ec99-4a23-a06c-29c2e8213375-kube-api-access-qtm8w\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.405000 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-rbac\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.405095 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-lokistack-gateway\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.405165 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.406076 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.406872 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-lokistack-gateway\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.413053 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.427249 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tenants\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.438151 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtm8w\" (UniqueName: \"kubernetes.io/projected/f014bcf5-ec99-4a23-a06c-29c2e8213375-kube-api-access-qtm8w\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.501070 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507090 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507158 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507194 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tenants\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507224 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507266 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507300 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-rbac\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507385 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-lokistack-gateway\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.507453 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57wpk\" (UniqueName: \"kubernetes.io/projected/d4a36cc7-b86f-45f7-a422-db7241ba6513-kube-api-access-57wpk\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.509154 4877 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.509263 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tls-secret podName:d4a36cc7-b86f-45f7-a422-db7241ba6513 nodeName:}" failed. No retries permitted until 2026-01-28 16:49:46.00923881 +0000 UTC m=+889.567565698 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tls-secret") pod "logging-loki-gateway-5f6787f74d-h6fl4" (UID: "d4a36cc7-b86f-45f7-a422-db7241ba6513") : secret "logging-loki-gateway-http" not found Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.510377 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-rbac\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.510489 4877 configmap.go:193] Couldn't get configMap openshift-logging/logging-loki-gateway-ca-bundle: configmap "logging-loki-gateway-ca-bundle" not found Jan 28 16:49:45 crc kubenswrapper[4877]: E0128 16:49:45.510518 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-ca-bundle podName:d4a36cc7-b86f-45f7-a422-db7241ba6513 nodeName:}" failed. No retries permitted until 2026-01-28 16:49:46.010510205 +0000 UTC m=+889.568837093 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "logging-loki-gateway-ca-bundle" (UniqueName: "kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-ca-bundle") pod "logging-loki-gateway-5f6787f74d-h6fl4" (UID: "d4a36cc7-b86f-45f7-a422-db7241ba6513") : configmap "logging-loki-gateway-ca-bundle" not found Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.511256 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-lokistack-gateway\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.511273 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.511687 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.531197 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tenants\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.535721 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57wpk\" (UniqueName: \"kubernetes.io/projected/d4a36cc7-b86f-45f7-a422-db7241ba6513-kube-api-access-57wpk\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.778082 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.844704 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-76788598db-8zdq7"] Jan 28 16:49:45 crc kubenswrapper[4877]: W0128 16:49:45.846551 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod094adba0_094e_453d_87f4_b2098f9fe680.slice/crio-87308ce88709b4f284ac8e7e1fd5ba55159116f14273416c3a74851c6eed7d3b WatchSource:0}: Error finding container 87308ce88709b4f284ac8e7e1fd5ba55159116f14273416c3a74851c6eed7d3b: Status 404 returned error can't find the container with id 87308ce88709b4f284ac8e7e1fd5ba55159116f14273416c3a74851c6eed7d3b Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.864454 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.865749 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.871310 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.871588 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.879142 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.916542 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.916696 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.917611 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f014bcf5-ec99-4a23-a06c-29c2e8213375-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.922163 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f014bcf5-ec99-4a23-a06c-29c2e8213375-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-4m9v2\" (UID: \"f014bcf5-ec99-4a23-a06c-29c2e8213375\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.953451 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.965875 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.967135 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.970893 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.979231 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Jan 28 16:49:45 crc kubenswrapper[4877]: I0128 16:49:45.985159 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.018687 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.018780 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.018854 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.018924 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.018945 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.019025 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.019111 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5sst\" (UniqueName: \"kubernetes.io/projected/1e5a7c04-5664-4b81-91cc-51402cb80ad0-kube-api-access-j5sst\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.019167 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e5a7c04-5664-4b81-91cc-51402cb80ad0-config\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.019228 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.019253 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.021447 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4a36cc7-b86f-45f7-a422-db7241ba6513-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.026281 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/d4a36cc7-b86f-45f7-a422-db7241ba6513-tls-secret\") pod \"logging-loki-gateway-5f6787f74d-h6fl4\" (UID: \"d4a36cc7-b86f-45f7-a422-db7241ba6513\") " pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.040733 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.042384 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.047767 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.048675 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.057270 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121334 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5sst\" (UniqueName: \"kubernetes.io/projected/1e5a7c04-5664-4b81-91cc-51402cb80ad0-kube-api-access-j5sst\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121463 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-config\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121513 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e5a7c04-5664-4b81-91cc-51402cb80ad0-config\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121548 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121597 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121623 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121651 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gww6d\" (UniqueName: \"kubernetes.io/projected/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-kube-api-access-gww6d\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121686 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121724 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121748 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121776 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121821 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121859 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121900 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.121922 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.122861 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e5a7c04-5664-4b81-91cc-51402cb80ad0-config\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.123657 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.126981 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.127079 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.128603 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.128670 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/477403e498379d1d1e86d0aa58ec272bddfbc5f20f8e8538041ae002c1c709d0/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.129343 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/1e5a7c04-5664-4b81-91cc-51402cb80ad0-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.129748 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.129780 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2f4309749ae1453957f7c8ee9e5ed5abac2dc08d8b49ed0a9e6a0663c0e3942c/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.143884 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5sst\" (UniqueName: \"kubernetes.io/projected/1e5a7c04-5664-4b81-91cc-51402cb80ad0-kube-api-access-j5sst\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.166508 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6c53e72f-ac18-4b1d-af25-be3db13e06a4\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.169509 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7c776c3f-aba5-4aa5-ada3-0ba2c5d82509\") pod \"logging-loki-ingester-0\" (UID: \"1e5a7c04-5664-4b81-91cc-51402cb80ad0\") " pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.198548 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.212344 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.223514 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.223588 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.223628 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gww6d\" (UniqueName: \"kubernetes.io/projected/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-kube-api-access-gww6d\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.223837 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.223910 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gtg9\" (UniqueName: \"kubernetes.io/projected/664f0a31-3c9a-4024-bd19-08326d7cdfaa-kube-api-access-7gtg9\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.223969 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/664f0a31-3c9a-4024-bd19-08326d7cdfaa-config\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224065 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-config\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224090 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224116 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224140 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224167 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224186 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224214 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.224237 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.225289 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-config\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.230288 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.230449 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.231508 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.232382 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.244364 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.244678 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/34eb651823d51292fba538063359a772a9f5e04899f00ce0ea8df26e350858f2/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.247601 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gww6d\" (UniqueName: \"kubernetes.io/projected/8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a-kube-api-access-gww6d\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.272637 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a3fc8971-9018-40fe-93e7-5f18b4239bf0\") pod \"logging-loki-compactor-0\" (UID: \"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a\") " pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.321764 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.329383 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.329436 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.329489 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.329536 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.329565 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gtg9\" (UniqueName: \"kubernetes.io/projected/664f0a31-3c9a-4024-bd19-08326d7cdfaa-kube-api-access-7gtg9\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.329593 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/664f0a31-3c9a-4024-bd19-08326d7cdfaa-config\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.329658 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.332279 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/664f0a31-3c9a-4024-bd19-08326d7cdfaa-config\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.335002 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.335631 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.335770 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.336742 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.336777 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/950f3aaa677915d431e8b83d58f49ced54149b12b5c4573b50a29c584d9ffff5/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.338582 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/664f0a31-3c9a-4024-bd19-08326d7cdfaa-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.347462 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.358417 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gtg9\" (UniqueName: \"kubernetes.io/projected/664f0a31-3c9a-4024-bd19-08326d7cdfaa-kube-api-access-7gtg9\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.360258 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-32e29e53-6215-40eb-bf6e-873c26dc20df\") pod \"logging-loki-index-gateway-0\" (UID: \"664f0a31-3c9a-4024-bd19-08326d7cdfaa\") " pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.374397 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.377160 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" event={"ID":"094adba0-094e-453d-87f4-b2098f9fe680","Type":"ContainerStarted","Data":"87308ce88709b4f284ac8e7e1fd5ba55159116f14273416c3a74851c6eed7d3b"} Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.385845 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" event={"ID":"8edb732d-043a-4fbf-b0b1-da98fdaa9a84","Type":"ContainerStarted","Data":"3bd7b2babb6a76dd29f6b66d9cdfc10ecb0128a296d6ffd5f43dd49687f34330"} Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.386782 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" event={"ID":"ea81a55a-52f7-471b-bff8-9b49e05d459a","Type":"ContainerStarted","Data":"a5e3570b27b92d4db3e4c235acee24db9096cee51725aabfcdb556a3e0e06be6"} Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.467289 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.723144 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2"] Jan 28 16:49:46 crc kubenswrapper[4877]: W0128 16:49:46.734867 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf014bcf5_ec99_4a23_a06c_29c2e8213375.slice/crio-ad21f7a337dd3986b6725ce1a7f37e248b0b541d61e256a7fd467728460a8093 WatchSource:0}: Error finding container ad21f7a337dd3986b6725ce1a7f37e248b0b541d61e256a7fd467728460a8093: Status 404 returned error can't find the container with id ad21f7a337dd3986b6725ce1a7f37e248b0b541d61e256a7fd467728460a8093 Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.848928 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Jan 28 16:49:46 crc kubenswrapper[4877]: W0128 16:49:46.851190 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d6ab0ef_49ee_4d3a_9dbb_bedc9617406a.slice/crio-353970d9b0bdb61d938ae5ae5d808ed774800db01221e364894d7ed6d294c4b5 WatchSource:0}: Error finding container 353970d9b0bdb61d938ae5ae5d808ed774800db01221e364894d7ed6d294c4b5: Status 404 returned error can't find the container with id 353970d9b0bdb61d938ae5ae5d808ed774800db01221e364894d7ed6d294c4b5 Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.858265 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Jan 28 16:49:46 crc kubenswrapper[4877]: I0128 16:49:46.865469 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4"] Jan 28 16:49:46 crc kubenswrapper[4877]: W0128 16:49:46.868004 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4a36cc7_b86f_45f7_a422_db7241ba6513.slice/crio-dbe2978b07a84944c149cf3395aa3beaf3696a7fa1fbe7e7587ca9d99d2b6638 WatchSource:0}: Error finding container dbe2978b07a84944c149cf3395aa3beaf3696a7fa1fbe7e7587ca9d99d2b6638: Status 404 returned error can't find the container with id dbe2978b07a84944c149cf3395aa3beaf3696a7fa1fbe7e7587ca9d99d2b6638 Jan 28 16:49:46 crc kubenswrapper[4877]: W0128 16:49:46.868704 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod664f0a31_3c9a_4024_bd19_08326d7cdfaa.slice/crio-005ee08c295fbba22ce4980fcc3030e6b0274ee30883edc0b5069c7a49e17e8f WatchSource:0}: Error finding container 005ee08c295fbba22ce4980fcc3030e6b0274ee30883edc0b5069c7a49e17e8f: Status 404 returned error can't find the container with id 005ee08c295fbba22ce4980fcc3030e6b0274ee30883edc0b5069c7a49e17e8f Jan 28 16:49:47 crc kubenswrapper[4877]: I0128 16:49:47.414915 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" event={"ID":"f014bcf5-ec99-4a23-a06c-29c2e8213375","Type":"ContainerStarted","Data":"ad21f7a337dd3986b6725ce1a7f37e248b0b541d61e256a7fd467728460a8093"} Jan 28 16:49:47 crc kubenswrapper[4877]: I0128 16:49:47.418233 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"1e5a7c04-5664-4b81-91cc-51402cb80ad0","Type":"ContainerStarted","Data":"393aa38aba9c096f3399deffcd29e328c551289362f1ea5a39d57f99504f0459"} Jan 28 16:49:47 crc kubenswrapper[4877]: I0128 16:49:47.425913 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a","Type":"ContainerStarted","Data":"353970d9b0bdb61d938ae5ae5d808ed774800db01221e364894d7ed6d294c4b5"} Jan 28 16:49:47 crc kubenswrapper[4877]: I0128 16:49:47.428275 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"664f0a31-3c9a-4024-bd19-08326d7cdfaa","Type":"ContainerStarted","Data":"005ee08c295fbba22ce4980fcc3030e6b0274ee30883edc0b5069c7a49e17e8f"} Jan 28 16:49:47 crc kubenswrapper[4877]: I0128 16:49:47.431149 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" event={"ID":"d4a36cc7-b86f-45f7-a422-db7241ba6513","Type":"ContainerStarted","Data":"dbe2978b07a84944c149cf3395aa3beaf3696a7fa1fbe7e7587ca9d99d2b6638"} Jan 28 16:49:51 crc kubenswrapper[4877]: I0128 16:49:51.467142 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" event={"ID":"8edb732d-043a-4fbf-b0b1-da98fdaa9a84","Type":"ContainerStarted","Data":"8f4da036e84213d2efa8d125a73b3b27b9c5150eda451aa12de4dff2b1d03e9b"} Jan 28 16:49:51 crc kubenswrapper[4877]: I0128 16:49:51.467842 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:49:51 crc kubenswrapper[4877]: I0128 16:49:51.494623 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" podStartSLOduration=2.633221316 podStartE2EDuration="7.4946014s" podCreationTimestamp="2026-01-28 16:49:44 +0000 UTC" firstStartedPulling="2026-01-28 16:49:45.789684915 +0000 UTC m=+889.348011803" lastFinishedPulling="2026-01-28 16:49:50.651064979 +0000 UTC m=+894.209391887" observedRunningTime="2026-01-28 16:49:51.493600612 +0000 UTC m=+895.051927500" watchObservedRunningTime="2026-01-28 16:49:51.4946014 +0000 UTC m=+895.052928288" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.480691 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"664f0a31-3c9a-4024-bd19-08326d7cdfaa","Type":"ContainerStarted","Data":"82967e7d2777c98bc5aa612af6e1a12be993d881c2b9030581ef83a8a03ee4b3"} Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.481994 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.486512 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" event={"ID":"ea81a55a-52f7-471b-bff8-9b49e05d459a","Type":"ContainerStarted","Data":"42ad27e053e33048b1b4c85073eae63f65e99c2ee7e86b2afc512709b1caf681"} Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.487112 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.489927 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" event={"ID":"d4a36cc7-b86f-45f7-a422-db7241ba6513","Type":"ContainerStarted","Data":"71916f9a344ef00c018190f4bd2f17ab1995e18dedabece75ae02e563d104b6c"} Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.492705 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" event={"ID":"094adba0-094e-453d-87f4-b2098f9fe680","Type":"ContainerStarted","Data":"f5c633ec51c9c3c6aae8605647009f6d035007d07fa04e6d46f6c13c5749c664"} Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.493313 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.499488 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"1e5a7c04-5664-4b81-91cc-51402cb80ad0","Type":"ContainerStarted","Data":"9045c31c8f59d0dffe5f7ff4b56eec8568776eeb9cdcf9620785f5f90151427e"} Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.499970 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.503465 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"8d6ab0ef-49ee-4d3a-9dbb-bedc9617406a","Type":"ContainerStarted","Data":"4d073fcadc0f61ed317c8a38b489726c206bee3c1a42e1823b89c0cf52aab3dc"} Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.513906 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.7289250579999997 podStartE2EDuration="7.513863463s" podCreationTimestamp="2026-01-28 16:49:45 +0000 UTC" firstStartedPulling="2026-01-28 16:49:46.875876107 +0000 UTC m=+890.434202995" lastFinishedPulling="2026-01-28 16:49:50.660814472 +0000 UTC m=+894.219141400" observedRunningTime="2026-01-28 16:49:52.509844564 +0000 UTC m=+896.068171472" watchObservedRunningTime="2026-01-28 16:49:52.513863463 +0000 UTC m=+896.072190391" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.546468 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" podStartSLOduration=2.858323574 podStartE2EDuration="8.546436244s" podCreationTimestamp="2026-01-28 16:49:44 +0000 UTC" firstStartedPulling="2026-01-28 16:49:45.964692728 +0000 UTC m=+889.523019626" lastFinishedPulling="2026-01-28 16:49:51.652805398 +0000 UTC m=+895.211132296" observedRunningTime="2026-01-28 16:49:52.545411707 +0000 UTC m=+896.103738615" watchObservedRunningTime="2026-01-28 16:49:52.546436244 +0000 UTC m=+896.104763172" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.574197 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.77309493 podStartE2EDuration="8.574171304s" podCreationTimestamp="2026-01-28 16:49:44 +0000 UTC" firstStartedPulling="2026-01-28 16:49:46.854075408 +0000 UTC m=+890.412402296" lastFinishedPulling="2026-01-28 16:49:51.655151782 +0000 UTC m=+895.213478670" observedRunningTime="2026-01-28 16:49:52.56404147 +0000 UTC m=+896.122368358" watchObservedRunningTime="2026-01-28 16:49:52.574171304 +0000 UTC m=+896.132498202" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.589095 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" podStartSLOduration=2.80684114 podStartE2EDuration="8.589075297s" podCreationTimestamp="2026-01-28 16:49:44 +0000 UTC" firstStartedPulling="2026-01-28 16:49:45.853319925 +0000 UTC m=+889.411646813" lastFinishedPulling="2026-01-28 16:49:51.635554082 +0000 UTC m=+895.193880970" observedRunningTime="2026-01-28 16:49:52.587383152 +0000 UTC m=+896.145710080" watchObservedRunningTime="2026-01-28 16:49:52.589075297 +0000 UTC m=+896.147402175" Jan 28 16:49:52 crc kubenswrapper[4877]: I0128 16:49:52.610247 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.486330766 podStartE2EDuration="8.610222669s" podCreationTimestamp="2026-01-28 16:49:44 +0000 UTC" firstStartedPulling="2026-01-28 16:49:46.503296432 +0000 UTC m=+890.061623320" lastFinishedPulling="2026-01-28 16:49:51.627188335 +0000 UTC m=+895.185515223" observedRunningTime="2026-01-28 16:49:52.608262256 +0000 UTC m=+896.166589144" watchObservedRunningTime="2026-01-28 16:49:52.610222669 +0000 UTC m=+896.168549557" Jan 28 16:49:53 crc kubenswrapper[4877]: I0128 16:49:53.511540 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.564699 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" event={"ID":"d4a36cc7-b86f-45f7-a422-db7241ba6513","Type":"ContainerStarted","Data":"38cd7e419a5da26fdf98e5c38510dcfe8c17daf8b4b2ed40ab6f90eebac57f8e"} Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.568685 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" event={"ID":"f014bcf5-ec99-4a23-a06c-29c2e8213375","Type":"ContainerStarted","Data":"86c1ae0bbfcaeef8a4557a1735d19c4ca60822b46174be6f235ed5f1e3a82409"} Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.568755 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.568779 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" event={"ID":"f014bcf5-ec99-4a23-a06c-29c2e8213375","Type":"ContainerStarted","Data":"c096e11d5c5e1442be74263397f95877d6f45d700213a8d8fa0757a83025c6bf"} Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.568806 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.568829 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.568851 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.582466 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.582891 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.587112 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.588895 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.621827 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" podStartSLOduration=2.706649755 podStartE2EDuration="14.621800061s" podCreationTimestamp="2026-01-28 16:49:45 +0000 UTC" firstStartedPulling="2026-01-28 16:49:46.874227443 +0000 UTC m=+890.432554331" lastFinishedPulling="2026-01-28 16:49:58.789377749 +0000 UTC m=+902.347704637" observedRunningTime="2026-01-28 16:49:59.595724765 +0000 UTC m=+903.154051653" watchObservedRunningTime="2026-01-28 16:49:59.621800061 +0000 UTC m=+903.180126949" Jan 28 16:49:59 crc kubenswrapper[4877]: I0128 16:49:59.626674 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" podStartSLOduration=2.431161454 podStartE2EDuration="14.626658282s" podCreationTimestamp="2026-01-28 16:49:45 +0000 UTC" firstStartedPulling="2026-01-28 16:49:46.737575508 +0000 UTC m=+890.295902396" lastFinishedPulling="2026-01-28 16:49:58.933072336 +0000 UTC m=+902.491399224" observedRunningTime="2026-01-28 16:49:59.619525889 +0000 UTC m=+903.177852797" watchObservedRunningTime="2026-01-28 16:49:59.626658282 +0000 UTC m=+903.184985170" Jan 28 16:50:06 crc kubenswrapper[4877]: I0128 16:50:06.207358 4877 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Jan 28 16:50:06 crc kubenswrapper[4877]: I0128 16:50:06.208073 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="1e5a7c04-5664-4b81-91cc-51402cb80ad0" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 16:50:06 crc kubenswrapper[4877]: I0128 16:50:06.362713 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Jan 28 16:50:06 crc kubenswrapper[4877]: I0128 16:50:06.395762 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Jan 28 16:50:07 crc kubenswrapper[4877]: I0128 16:50:07.077061 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:50:07 crc kubenswrapper[4877]: I0128 16:50:07.077154 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.318356 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f5czb"] Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.320771 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.340616 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f5czb"] Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.407582 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-catalog-content\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.408128 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-utilities\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.408220 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dznp5\" (UniqueName: \"kubernetes.io/projected/ce950113-65ba-405a-806e-72b77961e39b-kube-api-access-dznp5\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.510454 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-utilities\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.510545 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dznp5\" (UniqueName: \"kubernetes.io/projected/ce950113-65ba-405a-806e-72b77961e39b-kube-api-access-dznp5\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.510625 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-catalog-content\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.511710 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-utilities\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.511736 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-catalog-content\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.547882 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dznp5\" (UniqueName: \"kubernetes.io/projected/ce950113-65ba-405a-806e-72b77961e39b-kube-api-access-dznp5\") pod \"community-operators-f5czb\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:13 crc kubenswrapper[4877]: I0128 16:50:13.640253 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:14 crc kubenswrapper[4877]: I0128 16:50:14.174233 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f5czb"] Jan 28 16:50:14 crc kubenswrapper[4877]: W0128 16:50:14.181547 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce950113_65ba_405a_806e_72b77961e39b.slice/crio-455cef043fda95500046c60ce638bfd5359f950a1bcdaff33195c669eb320a05 WatchSource:0}: Error finding container 455cef043fda95500046c60ce638bfd5359f950a1bcdaff33195c669eb320a05: Status 404 returned error can't find the container with id 455cef043fda95500046c60ce638bfd5359f950a1bcdaff33195c669eb320a05 Jan 28 16:50:14 crc kubenswrapper[4877]: I0128 16:50:14.703190 4877 generic.go:334] "Generic (PLEG): container finished" podID="ce950113-65ba-405a-806e-72b77961e39b" containerID="bf1be9479ae9d8148ec4b81dff63a785ce7d235b3f4242763bf99bd2d48e6f88" exitCode=0 Jan 28 16:50:14 crc kubenswrapper[4877]: I0128 16:50:14.703698 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5czb" event={"ID":"ce950113-65ba-405a-806e-72b77961e39b","Type":"ContainerDied","Data":"bf1be9479ae9d8148ec4b81dff63a785ce7d235b3f4242763bf99bd2d48e6f88"} Jan 28 16:50:14 crc kubenswrapper[4877]: I0128 16:50:14.703739 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5czb" event={"ID":"ce950113-65ba-405a-806e-72b77961e39b","Type":"ContainerStarted","Data":"455cef043fda95500046c60ce638bfd5359f950a1bcdaff33195c669eb320a05"} Jan 28 16:50:14 crc kubenswrapper[4877]: I0128 16:50:14.999813 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" Jan 28 16:50:15 crc kubenswrapper[4877]: I0128 16:50:15.407293 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" Jan 28 16:50:15 crc kubenswrapper[4877]: I0128 16:50:15.509353 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" Jan 28 16:50:16 crc kubenswrapper[4877]: I0128 16:50:16.206746 4877 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Jan 28 16:50:16 crc kubenswrapper[4877]: I0128 16:50:16.206825 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="1e5a7c04-5664-4b81-91cc-51402cb80ad0" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 16:50:19 crc kubenswrapper[4877]: I0128 16:50:19.762958 4877 generic.go:334] "Generic (PLEG): container finished" podID="ce950113-65ba-405a-806e-72b77961e39b" containerID="5f8f87e41e94af1b0654fea7f75ed0399abe283f06efc1a83310324985776b85" exitCode=0 Jan 28 16:50:19 crc kubenswrapper[4877]: I0128 16:50:19.763138 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5czb" event={"ID":"ce950113-65ba-405a-806e-72b77961e39b","Type":"ContainerDied","Data":"5f8f87e41e94af1b0654fea7f75ed0399abe283f06efc1a83310324985776b85"} Jan 28 16:50:20 crc kubenswrapper[4877]: I0128 16:50:20.775638 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5czb" event={"ID":"ce950113-65ba-405a-806e-72b77961e39b","Type":"ContainerStarted","Data":"133e7341fa50aa8eeabcd51985051d28974ce1e991f62c87d77b8c71ea41ec2f"} Jan 28 16:50:20 crc kubenswrapper[4877]: I0128 16:50:20.797710 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f5czb" podStartSLOduration=2.034944989 podStartE2EDuration="7.797687182s" podCreationTimestamp="2026-01-28 16:50:13 +0000 UTC" firstStartedPulling="2026-01-28 16:50:14.705965155 +0000 UTC m=+918.264292043" lastFinishedPulling="2026-01-28 16:50:20.468707348 +0000 UTC m=+924.027034236" observedRunningTime="2026-01-28 16:50:20.7924588 +0000 UTC m=+924.350785728" watchObservedRunningTime="2026-01-28 16:50:20.797687182 +0000 UTC m=+924.356014070" Jan 28 16:50:23 crc kubenswrapper[4877]: I0128 16:50:23.641257 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:23 crc kubenswrapper[4877]: I0128 16:50:23.641780 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:23 crc kubenswrapper[4877]: I0128 16:50:23.705500 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:26 crc kubenswrapper[4877]: I0128 16:50:26.204744 4877 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Jan 28 16:50:26 crc kubenswrapper[4877]: I0128 16:50:26.205713 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="1e5a7c04-5664-4b81-91cc-51402cb80ad0" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 16:50:33 crc kubenswrapper[4877]: I0128 16:50:33.691312 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f5czb" Jan 28 16:50:33 crc kubenswrapper[4877]: I0128 16:50:33.789367 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f5czb"] Jan 28 16:50:33 crc kubenswrapper[4877]: I0128 16:50:33.833642 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-98q9v"] Jan 28 16:50:33 crc kubenswrapper[4877]: I0128 16:50:33.833912 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-98q9v" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="registry-server" containerID="cri-o://b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046" gracePeriod=2 Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.360549 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.518843 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbb4z\" (UniqueName: \"kubernetes.io/projected/5aab0675-0c41-459e-aa67-b47ad5190813-kube-api-access-mbb4z\") pod \"5aab0675-0c41-459e-aa67-b47ad5190813\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.518938 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-catalog-content\") pod \"5aab0675-0c41-459e-aa67-b47ad5190813\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.519074 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-utilities\") pod \"5aab0675-0c41-459e-aa67-b47ad5190813\" (UID: \"5aab0675-0c41-459e-aa67-b47ad5190813\") " Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.520338 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-utilities" (OuterVolumeSpecName: "utilities") pod "5aab0675-0c41-459e-aa67-b47ad5190813" (UID: "5aab0675-0c41-459e-aa67-b47ad5190813"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.527226 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5aab0675-0c41-459e-aa67-b47ad5190813-kube-api-access-mbb4z" (OuterVolumeSpecName: "kube-api-access-mbb4z") pod "5aab0675-0c41-459e-aa67-b47ad5190813" (UID: "5aab0675-0c41-459e-aa67-b47ad5190813"). InnerVolumeSpecName "kube-api-access-mbb4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.589932 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5aab0675-0c41-459e-aa67-b47ad5190813" (UID: "5aab0675-0c41-459e-aa67-b47ad5190813"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.621624 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbb4z\" (UniqueName: \"kubernetes.io/projected/5aab0675-0c41-459e-aa67-b47ad5190813-kube-api-access-mbb4z\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.621670 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.621684 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aab0675-0c41-459e-aa67-b47ad5190813-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.910998 4877 generic.go:334] "Generic (PLEG): container finished" podID="5aab0675-0c41-459e-aa67-b47ad5190813" containerID="b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046" exitCode=0 Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.911054 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98q9v" event={"ID":"5aab0675-0c41-459e-aa67-b47ad5190813","Type":"ContainerDied","Data":"b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046"} Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.911070 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98q9v" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.911088 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98q9v" event={"ID":"5aab0675-0c41-459e-aa67-b47ad5190813","Type":"ContainerDied","Data":"51870b09de7db9984506ea9bc4938e5e753538cc5a55050bf107ad88cefb815c"} Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.911119 4877 scope.go:117] "RemoveContainer" containerID="b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.955958 4877 scope.go:117] "RemoveContainer" containerID="2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263" Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.958338 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-98q9v"] Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.965916 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-98q9v"] Jan 28 16:50:34 crc kubenswrapper[4877]: I0128 16:50:34.993173 4877 scope.go:117] "RemoveContainer" containerID="12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50" Jan 28 16:50:35 crc kubenswrapper[4877]: I0128 16:50:35.017176 4877 scope.go:117] "RemoveContainer" containerID="b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046" Jan 28 16:50:35 crc kubenswrapper[4877]: E0128 16:50:35.018188 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046\": container with ID starting with b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046 not found: ID does not exist" containerID="b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046" Jan 28 16:50:35 crc kubenswrapper[4877]: I0128 16:50:35.018265 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046"} err="failed to get container status \"b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046\": rpc error: code = NotFound desc = could not find container \"b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046\": container with ID starting with b3b04a090d6f6938eef642f35eccddb8dcfabfdd2b1726b7b14b2fff6e4d4046 not found: ID does not exist" Jan 28 16:50:35 crc kubenswrapper[4877]: I0128 16:50:35.018300 4877 scope.go:117] "RemoveContainer" containerID="2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263" Jan 28 16:50:35 crc kubenswrapper[4877]: E0128 16:50:35.019038 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263\": container with ID starting with 2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263 not found: ID does not exist" containerID="2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263" Jan 28 16:50:35 crc kubenswrapper[4877]: I0128 16:50:35.019104 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263"} err="failed to get container status \"2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263\": rpc error: code = NotFound desc = could not find container \"2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263\": container with ID starting with 2c5a4488a7e8d926cedcb1c70d3975504f997cb0d93ea822431848debbf9e263 not found: ID does not exist" Jan 28 16:50:35 crc kubenswrapper[4877]: I0128 16:50:35.019139 4877 scope.go:117] "RemoveContainer" containerID="12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50" Jan 28 16:50:35 crc kubenswrapper[4877]: E0128 16:50:35.022878 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50\": container with ID starting with 12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50 not found: ID does not exist" containerID="12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50" Jan 28 16:50:35 crc kubenswrapper[4877]: I0128 16:50:35.022964 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50"} err="failed to get container status \"12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50\": rpc error: code = NotFound desc = could not find container \"12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50\": container with ID starting with 12724822b5129bfb5f1431a25411709307e9a7bc30d2b327a351342cb8474c50 not found: ID does not exist" Jan 28 16:50:35 crc kubenswrapper[4877]: I0128 16:50:35.341430 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" path="/var/lib/kubelet/pods/5aab0675-0c41-459e-aa67-b47ad5190813/volumes" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.205575 4877 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.205649 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="1e5a7c04-5664-4b81-91cc-51402cb80ad0" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.943633 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-k84lg"] Jan 28 16:50:36 crc kubenswrapper[4877]: E0128 16:50:36.944390 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="extract-content" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.944494 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="extract-content" Jan 28 16:50:36 crc kubenswrapper[4877]: E0128 16:50:36.944565 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="registry-server" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.944626 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="registry-server" Jan 28 16:50:36 crc kubenswrapper[4877]: E0128 16:50:36.944708 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="extract-utilities" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.944765 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="extract-utilities" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.944949 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aab0675-0c41-459e-aa67-b47ad5190813" containerName="registry-server" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.946183 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.962434 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k84lg"] Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.993606 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-catalog-content\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.993939 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72lp8\" (UniqueName: \"kubernetes.io/projected/fe9ef16f-891b-48f5-9612-07064ab2b5f2-kube-api-access-72lp8\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:36 crc kubenswrapper[4877]: I0128 16:50:36.994015 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-utilities\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.076532 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.076613 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.095252 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72lp8\" (UniqueName: \"kubernetes.io/projected/fe9ef16f-891b-48f5-9612-07064ab2b5f2-kube-api-access-72lp8\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.095316 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-utilities\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.095376 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-catalog-content\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.095958 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-catalog-content\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.096118 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-utilities\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.118799 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72lp8\" (UniqueName: \"kubernetes.io/projected/fe9ef16f-891b-48f5-9612-07064ab2b5f2-kube-api-access-72lp8\") pod \"redhat-marketplace-k84lg\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.285433 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.825954 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k84lg"] Jan 28 16:50:37 crc kubenswrapper[4877]: I0128 16:50:37.945022 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k84lg" event={"ID":"fe9ef16f-891b-48f5-9612-07064ab2b5f2","Type":"ContainerStarted","Data":"448eb37083eecb9b8a78a43922764fab1afe8b0eff2558a9da7d53999a23a45f"} Jan 28 16:50:38 crc kubenswrapper[4877]: I0128 16:50:38.956401 4877 generic.go:334] "Generic (PLEG): container finished" podID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerID="4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c" exitCode=0 Jan 28 16:50:38 crc kubenswrapper[4877]: I0128 16:50:38.956461 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k84lg" event={"ID":"fe9ef16f-891b-48f5-9612-07064ab2b5f2","Type":"ContainerDied","Data":"4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c"} Jan 28 16:50:39 crc kubenswrapper[4877]: I0128 16:50:39.966264 4877 generic.go:334] "Generic (PLEG): container finished" podID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerID="6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1" exitCode=0 Jan 28 16:50:39 crc kubenswrapper[4877]: I0128 16:50:39.966376 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k84lg" event={"ID":"fe9ef16f-891b-48f5-9612-07064ab2b5f2","Type":"ContainerDied","Data":"6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1"} Jan 28 16:50:40 crc kubenswrapper[4877]: I0128 16:50:40.977784 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k84lg" event={"ID":"fe9ef16f-891b-48f5-9612-07064ab2b5f2","Type":"ContainerStarted","Data":"a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464"} Jan 28 16:50:41 crc kubenswrapper[4877]: I0128 16:50:41.000051 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-k84lg" podStartSLOduration=3.383430668 podStartE2EDuration="5.000021993s" podCreationTimestamp="2026-01-28 16:50:36 +0000 UTC" firstStartedPulling="2026-01-28 16:50:38.961420719 +0000 UTC m=+942.519747607" lastFinishedPulling="2026-01-28 16:50:40.578012044 +0000 UTC m=+944.136338932" observedRunningTime="2026-01-28 16:50:40.997625878 +0000 UTC m=+944.555952766" watchObservedRunningTime="2026-01-28 16:50:41.000021993 +0000 UTC m=+944.558348901" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.348399 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x5lzx"] Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.352438 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.362129 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x5lzx"] Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.551374 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlj5x\" (UniqueName: \"kubernetes.io/projected/9e639e4e-cc8e-493e-a625-d879e6ea5717-kube-api-access-mlj5x\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.551445 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-catalog-content\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.551955 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-utilities\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.653495 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-utilities\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.653611 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlj5x\" (UniqueName: \"kubernetes.io/projected/9e639e4e-cc8e-493e-a625-d879e6ea5717-kube-api-access-mlj5x\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.653650 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-catalog-content\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.654035 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-utilities\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.654205 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-catalog-content\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.690081 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlj5x\" (UniqueName: \"kubernetes.io/projected/9e639e4e-cc8e-493e-a625-d879e6ea5717-kube-api-access-mlj5x\") pod \"certified-operators-x5lzx\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:45 crc kubenswrapper[4877]: I0128 16:50:45.980425 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:46 crc kubenswrapper[4877]: I0128 16:50:46.206549 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Jan 28 16:50:46 crc kubenswrapper[4877]: I0128 16:50:46.289052 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x5lzx"] Jan 28 16:50:47 crc kubenswrapper[4877]: I0128 16:50:47.041137 4877 generic.go:334] "Generic (PLEG): container finished" podID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerID="4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4" exitCode=0 Jan 28 16:50:47 crc kubenswrapper[4877]: I0128 16:50:47.041197 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x5lzx" event={"ID":"9e639e4e-cc8e-493e-a625-d879e6ea5717","Type":"ContainerDied","Data":"4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4"} Jan 28 16:50:47 crc kubenswrapper[4877]: I0128 16:50:47.041235 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x5lzx" event={"ID":"9e639e4e-cc8e-493e-a625-d879e6ea5717","Type":"ContainerStarted","Data":"571930091e7e9522f97301773f32ede5e1d93bfea070d8c4466f0e2c8f81ad7e"} Jan 28 16:50:47 crc kubenswrapper[4877]: I0128 16:50:47.286636 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:47 crc kubenswrapper[4877]: I0128 16:50:47.287119 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:47 crc kubenswrapper[4877]: I0128 16:50:47.346919 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:48 crc kubenswrapper[4877]: I0128 16:50:48.052607 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x5lzx" event={"ID":"9e639e4e-cc8e-493e-a625-d879e6ea5717","Type":"ContainerStarted","Data":"735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d"} Jan 28 16:50:48 crc kubenswrapper[4877]: I0128 16:50:48.114104 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:48 crc kubenswrapper[4877]: E0128 16:50:48.227741 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e639e4e_cc8e_493e_a625_d879e6ea5717.slice/crio-735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e639e4e_cc8e_493e_a625_d879e6ea5717.slice/crio-conmon-735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d.scope\": RecentStats: unable to find data in memory cache]" Jan 28 16:50:48 crc kubenswrapper[4877]: E0128 16:50:48.230653 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e639e4e_cc8e_493e_a625_d879e6ea5717.slice/crio-conmon-735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d.scope\": RecentStats: unable to find data in memory cache]" Jan 28 16:50:49 crc kubenswrapper[4877]: I0128 16:50:49.061312 4877 generic.go:334] "Generic (PLEG): container finished" podID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerID="735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d" exitCode=0 Jan 28 16:50:49 crc kubenswrapper[4877]: I0128 16:50:49.061365 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x5lzx" event={"ID":"9e639e4e-cc8e-493e-a625-d879e6ea5717","Type":"ContainerDied","Data":"735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d"} Jan 28 16:50:49 crc kubenswrapper[4877]: I0128 16:50:49.731004 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k84lg"] Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.071423 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x5lzx" event={"ID":"9e639e4e-cc8e-493e-a625-d879e6ea5717","Type":"ContainerStarted","Data":"764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df"} Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.071639 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-k84lg" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="registry-server" containerID="cri-o://a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464" gracePeriod=2 Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.101162 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x5lzx" podStartSLOduration=2.606300091 podStartE2EDuration="5.1011424s" podCreationTimestamp="2026-01-28 16:50:45 +0000 UTC" firstStartedPulling="2026-01-28 16:50:47.043124862 +0000 UTC m=+950.601451750" lastFinishedPulling="2026-01-28 16:50:49.537967171 +0000 UTC m=+953.096294059" observedRunningTime="2026-01-28 16:50:50.099090884 +0000 UTC m=+953.657417772" watchObservedRunningTime="2026-01-28 16:50:50.1011424 +0000 UTC m=+953.659469288" Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.529019 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.657512 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-catalog-content\") pod \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.657682 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72lp8\" (UniqueName: \"kubernetes.io/projected/fe9ef16f-891b-48f5-9612-07064ab2b5f2-kube-api-access-72lp8\") pod \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.658754 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-utilities\") pod \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\" (UID: \"fe9ef16f-891b-48f5-9612-07064ab2b5f2\") " Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.659530 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-utilities" (OuterVolumeSpecName: "utilities") pod "fe9ef16f-891b-48f5-9612-07064ab2b5f2" (UID: "fe9ef16f-891b-48f5-9612-07064ab2b5f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.660026 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.664881 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe9ef16f-891b-48f5-9612-07064ab2b5f2-kube-api-access-72lp8" (OuterVolumeSpecName: "kube-api-access-72lp8") pod "fe9ef16f-891b-48f5-9612-07064ab2b5f2" (UID: "fe9ef16f-891b-48f5-9612-07064ab2b5f2"). InnerVolumeSpecName "kube-api-access-72lp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:50:50 crc kubenswrapper[4877]: I0128 16:50:50.762091 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72lp8\" (UniqueName: \"kubernetes.io/projected/fe9ef16f-891b-48f5-9612-07064ab2b5f2-kube-api-access-72lp8\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.012448 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe9ef16f-891b-48f5-9612-07064ab2b5f2" (UID: "fe9ef16f-891b-48f5-9612-07064ab2b5f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.069182 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ef16f-891b-48f5-9612-07064ab2b5f2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.089251 4877 generic.go:334] "Generic (PLEG): container finished" podID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerID="a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464" exitCode=0 Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.090522 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k84lg" event={"ID":"fe9ef16f-891b-48f5-9612-07064ab2b5f2","Type":"ContainerDied","Data":"a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464"} Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.090918 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k84lg" event={"ID":"fe9ef16f-891b-48f5-9612-07064ab2b5f2","Type":"ContainerDied","Data":"448eb37083eecb9b8a78a43922764fab1afe8b0eff2558a9da7d53999a23a45f"} Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.090967 4877 scope.go:117] "RemoveContainer" containerID="a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.091135 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k84lg" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.123440 4877 scope.go:117] "RemoveContainer" containerID="6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.134420 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-k84lg"] Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.142980 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-k84lg"] Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.145583 4877 scope.go:117] "RemoveContainer" containerID="4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.171306 4877 scope.go:117] "RemoveContainer" containerID="a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464" Jan 28 16:50:51 crc kubenswrapper[4877]: E0128 16:50:51.171753 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464\": container with ID starting with a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464 not found: ID does not exist" containerID="a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.171792 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464"} err="failed to get container status \"a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464\": rpc error: code = NotFound desc = could not find container \"a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464\": container with ID starting with a6b25911e351cd368ab478b85787328125dcac71767a2c19a1ea0e810c5bb464 not found: ID does not exist" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.171818 4877 scope.go:117] "RemoveContainer" containerID="6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1" Jan 28 16:50:51 crc kubenswrapper[4877]: E0128 16:50:51.172204 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1\": container with ID starting with 6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1 not found: ID does not exist" containerID="6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.172227 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1"} err="failed to get container status \"6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1\": rpc error: code = NotFound desc = could not find container \"6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1\": container with ID starting with 6af6a24daac4976cc757e187786cf5378040eade8b738c695455ba5765c196c1 not found: ID does not exist" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.172245 4877 scope.go:117] "RemoveContainer" containerID="4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c" Jan 28 16:50:51 crc kubenswrapper[4877]: E0128 16:50:51.172609 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c\": container with ID starting with 4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c not found: ID does not exist" containerID="4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.172672 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c"} err="failed to get container status \"4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c\": rpc error: code = NotFound desc = could not find container \"4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c\": container with ID starting with 4f28466ddd76509a9429ab242983ee634687c9a4c856682aa72ccdb54e1c441c not found: ID does not exist" Jan 28 16:50:51 crc kubenswrapper[4877]: I0128 16:50:51.338818 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" path="/var/lib/kubelet/pods/fe9ef16f-891b-48f5-9612-07064ab2b5f2/volumes" Jan 28 16:50:55 crc kubenswrapper[4877]: I0128 16:50:55.981204 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:55 crc kubenswrapper[4877]: I0128 16:50:55.981677 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:56 crc kubenswrapper[4877]: I0128 16:50:56.042629 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:56 crc kubenswrapper[4877]: I0128 16:50:56.186463 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:56 crc kubenswrapper[4877]: I0128 16:50:56.290044 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x5lzx"] Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.151087 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x5lzx" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="registry-server" containerID="cri-o://764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df" gracePeriod=2 Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.537572 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.657045 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlj5x\" (UniqueName: \"kubernetes.io/projected/9e639e4e-cc8e-493e-a625-d879e6ea5717-kube-api-access-mlj5x\") pod \"9e639e4e-cc8e-493e-a625-d879e6ea5717\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.657225 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-utilities\") pod \"9e639e4e-cc8e-493e-a625-d879e6ea5717\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.657353 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-catalog-content\") pod \"9e639e4e-cc8e-493e-a625-d879e6ea5717\" (UID: \"9e639e4e-cc8e-493e-a625-d879e6ea5717\") " Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.658144 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-utilities" (OuterVolumeSpecName: "utilities") pod "9e639e4e-cc8e-493e-a625-d879e6ea5717" (UID: "9e639e4e-cc8e-493e-a625-d879e6ea5717"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.674598 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e639e4e-cc8e-493e-a625-d879e6ea5717-kube-api-access-mlj5x" (OuterVolumeSpecName: "kube-api-access-mlj5x") pod "9e639e4e-cc8e-493e-a625-d879e6ea5717" (UID: "9e639e4e-cc8e-493e-a625-d879e6ea5717"). InnerVolumeSpecName "kube-api-access-mlj5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.723132 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e639e4e-cc8e-493e-a625-d879e6ea5717" (UID: "9e639e4e-cc8e-493e-a625-d879e6ea5717"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.758849 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.758920 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e639e4e-cc8e-493e-a625-d879e6ea5717-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:58 crc kubenswrapper[4877]: I0128 16:50:58.758933 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlj5x\" (UniqueName: \"kubernetes.io/projected/9e639e4e-cc8e-493e-a625-d879e6ea5717-kube-api-access-mlj5x\") on node \"crc\" DevicePath \"\"" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.162691 4877 generic.go:334] "Generic (PLEG): container finished" podID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerID="764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df" exitCode=0 Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.162791 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x5lzx" event={"ID":"9e639e4e-cc8e-493e-a625-d879e6ea5717","Type":"ContainerDied","Data":"764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df"} Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.162866 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x5lzx" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.165521 4877 scope.go:117] "RemoveContainer" containerID="764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.165818 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x5lzx" event={"ID":"9e639e4e-cc8e-493e-a625-d879e6ea5717","Type":"ContainerDied","Data":"571930091e7e9522f97301773f32ede5e1d93bfea070d8c4466f0e2c8f81ad7e"} Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.191453 4877 scope.go:117] "RemoveContainer" containerID="735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.207951 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x5lzx"] Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.210706 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x5lzx"] Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.216635 4877 scope.go:117] "RemoveContainer" containerID="4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.247737 4877 scope.go:117] "RemoveContainer" containerID="764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df" Jan 28 16:50:59 crc kubenswrapper[4877]: E0128 16:50:59.248706 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df\": container with ID starting with 764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df not found: ID does not exist" containerID="764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.248760 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df"} err="failed to get container status \"764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df\": rpc error: code = NotFound desc = could not find container \"764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df\": container with ID starting with 764bd045cf69cf84eaaf08c884aba0e8493e425ede0e56e0f553d1dbaee4b0df not found: ID does not exist" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.248824 4877 scope.go:117] "RemoveContainer" containerID="735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d" Jan 28 16:50:59 crc kubenswrapper[4877]: E0128 16:50:59.249835 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d\": container with ID starting with 735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d not found: ID does not exist" containerID="735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.249864 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d"} err="failed to get container status \"735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d\": rpc error: code = NotFound desc = could not find container \"735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d\": container with ID starting with 735a06e3462163db2cd2a1d720a35dba4ea733e22797a1a33625dea8731c772d not found: ID does not exist" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.249880 4877 scope.go:117] "RemoveContainer" containerID="4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4" Jan 28 16:50:59 crc kubenswrapper[4877]: E0128 16:50:59.250088 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4\": container with ID starting with 4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4 not found: ID does not exist" containerID="4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.250110 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4"} err="failed to get container status \"4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4\": rpc error: code = NotFound desc = could not find container \"4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4\": container with ID starting with 4b84edbc7b3470f8afcc15480e469e53c902597e43006b3b6ab378751fb449a4 not found: ID does not exist" Jan 28 16:50:59 crc kubenswrapper[4877]: E0128 16:50:59.260320 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e639e4e_cc8e_493e_a625_d879e6ea5717.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e639e4e_cc8e_493e_a625_d879e6ea5717.slice/crio-571930091e7e9522f97301773f32ede5e1d93bfea070d8c4466f0e2c8f81ad7e\": RecentStats: unable to find data in memory cache]" Jan 28 16:50:59 crc kubenswrapper[4877]: I0128 16:50:59.344559 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" path="/var/lib/kubelet/pods/9e639e4e-cc8e-493e-a625-d879e6ea5717/volumes" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.822219 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-7g9lg"] Jan 28 16:51:03 crc kubenswrapper[4877]: E0128 16:51:03.823689 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="extract-content" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823704 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="extract-content" Jan 28 16:51:03 crc kubenswrapper[4877]: E0128 16:51:03.823718 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="extract-content" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823725 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="extract-content" Jan 28 16:51:03 crc kubenswrapper[4877]: E0128 16:51:03.823743 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="registry-server" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823749 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="registry-server" Jan 28 16:51:03 crc kubenswrapper[4877]: E0128 16:51:03.823759 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="extract-utilities" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823765 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="extract-utilities" Jan 28 16:51:03 crc kubenswrapper[4877]: E0128 16:51:03.823772 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="registry-server" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823778 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="registry-server" Jan 28 16:51:03 crc kubenswrapper[4877]: E0128 16:51:03.823793 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="extract-utilities" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823801 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="extract-utilities" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823926 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe9ef16f-891b-48f5-9612-07064ab2b5f2" containerName="registry-server" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.823934 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e639e4e-cc8e-493e-a625-d879e6ea5717" containerName="registry-server" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.824543 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.827701 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.827873 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.827918 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-5plwz" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.828115 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.828210 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.836255 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.842879 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-7g9lg"] Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.956016 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-metrics\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.956421 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.956571 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config-openshift-service-cacrt\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.956749 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.956868 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-token\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.957155 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-tmp\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.957233 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-sa-token\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.957266 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxgv8\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-kube-api-access-hxgv8\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.957363 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-trusted-ca\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.957701 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-entrypoint\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.957753 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-datadir\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:03 crc kubenswrapper[4877]: I0128 16:51:03.991599 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-7g9lg"] Jan 28 16:51:03 crc kubenswrapper[4877]: E0128 16:51:03.992582 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-hxgv8 metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-7g9lg" podUID="fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.059945 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-trusted-ca\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060045 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-entrypoint\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060075 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-datadir\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060124 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-metrics\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060145 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060169 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config-openshift-service-cacrt\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060237 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060257 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-token\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060326 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-tmp\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060351 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-sa-token\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.060371 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxgv8\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-kube-api-access-hxgv8\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.061012 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-datadir\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: E0128 16:51:04.061070 4877 secret.go:188] Couldn't get secret openshift-logging/collector-syslog-receiver: secret "collector-syslog-receiver" not found Jan 28 16:51:04 crc kubenswrapper[4877]: E0128 16:51:04.061129 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver podName:fc34ca4f-33c9-4b77-9a16-aad4a1d59da8 nodeName:}" failed. No retries permitted until 2026-01-28 16:51:04.561112938 +0000 UTC m=+968.119439826 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "collector-syslog-receiver" (UniqueName: "kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver") pod "collector-7g9lg" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8") : secret "collector-syslog-receiver" not found Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.061732 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-trusted-ca\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.061951 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-entrypoint\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.062263 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.062812 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config-openshift-service-cacrt\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.068390 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-metrics\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.076815 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-tmp\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.077396 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-token\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.077643 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-sa-token\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.089306 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxgv8\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-kube-api-access-hxgv8\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.205940 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.220245 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.365859 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-tmp\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.365914 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-sa-token\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.365953 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-token\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366003 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxgv8\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-kube-api-access-hxgv8\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366073 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-entrypoint\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366169 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config-openshift-service-cacrt\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366264 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366305 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-metrics\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366334 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-trusted-ca\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366354 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-datadir\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.366931 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-datadir" (OuterVolumeSpecName: "datadir") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.367423 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.367441 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.367821 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config" (OuterVolumeSpecName: "config") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.370511 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.370583 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-tmp" (OuterVolumeSpecName: "tmp") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.373912 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-metrics" (OuterVolumeSpecName: "metrics") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.374581 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-kube-api-access-hxgv8" (OuterVolumeSpecName: "kube-api-access-hxgv8") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "kube-api-access-hxgv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.375111 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-token" (OuterVolumeSpecName: "collector-token") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.377575 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-sa-token" (OuterVolumeSpecName: "sa-token") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469104 4877 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469154 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469165 4877 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469177 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469186 4877 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-datadir\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469195 4877 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-tmp\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469206 4877 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469215 4877 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-token\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469226 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxgv8\" (UniqueName: \"kubernetes.io/projected/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-kube-api-access-hxgv8\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.469236 4877 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-entrypoint\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.571218 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.578578 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver\") pod \"collector-7g9lg\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " pod="openshift-logging/collector-7g9lg" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.673257 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver\") pod \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\" (UID: \"fc34ca4f-33c9-4b77-9a16-aad4a1d59da8\") " Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.677171 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" (UID: "fc34ca4f-33c9-4b77-9a16-aad4a1d59da8"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:51:04 crc kubenswrapper[4877]: I0128 16:51:04.775232 4877 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.214285 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-7g9lg" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.283773 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-7g9lg"] Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.300490 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-7g9lg"] Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.306855 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-x8dp4"] Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.308366 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.311817 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-5plwz" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.312468 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.312910 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.313216 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.313369 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.314237 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-x8dp4"] Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.318306 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.348044 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc34ca4f-33c9-4b77-9a16-aad4a1d59da8" path="/var/lib/kubelet/pods/fc34ca4f-33c9-4b77-9a16-aad4a1d59da8/volumes" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.389422 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-config-openshift-service-cacrt\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.389527 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-trusted-ca\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.389824 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-config\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390052 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-collector-token\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390090 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/6fed2369-2dba-4f83-ba06-ff46636c1bb0-sa-token\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390143 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/6fed2369-2dba-4f83-ba06-ff46636c1bb0-tmp\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390198 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-metrics\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390331 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tztt2\" (UniqueName: \"kubernetes.io/projected/6fed2369-2dba-4f83-ba06-ff46636c1bb0-kube-api-access-tztt2\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390460 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/6fed2369-2dba-4f83-ba06-ff46636c1bb0-datadir\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390622 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-entrypoint\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.390661 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-collector-syslog-receiver\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492229 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-config-openshift-service-cacrt\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492285 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-trusted-ca\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492326 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-config\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492373 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-collector-token\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492395 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/6fed2369-2dba-4f83-ba06-ff46636c1bb0-sa-token\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492424 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/6fed2369-2dba-4f83-ba06-ff46636c1bb0-tmp\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492448 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-metrics\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492518 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tztt2\" (UniqueName: \"kubernetes.io/projected/6fed2369-2dba-4f83-ba06-ff46636c1bb0-kube-api-access-tztt2\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492550 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/6fed2369-2dba-4f83-ba06-ff46636c1bb0-datadir\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492589 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-entrypoint\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492615 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-collector-syslog-receiver\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.492983 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/6fed2369-2dba-4f83-ba06-ff46636c1bb0-datadir\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.493326 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-config-openshift-service-cacrt\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.494268 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-config\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.494384 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-entrypoint\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.494422 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6fed2369-2dba-4f83-ba06-ff46636c1bb0-trusted-ca\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.496789 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/6fed2369-2dba-4f83-ba06-ff46636c1bb0-tmp\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.502132 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-collector-syslog-receiver\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.502418 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-collector-token\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.503305 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/6fed2369-2dba-4f83-ba06-ff46636c1bb0-metrics\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.512424 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tztt2\" (UniqueName: \"kubernetes.io/projected/6fed2369-2dba-4f83-ba06-ff46636c1bb0-kube-api-access-tztt2\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.515107 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/6fed2369-2dba-4f83-ba06-ff46636c1bb0-sa-token\") pod \"collector-x8dp4\" (UID: \"6fed2369-2dba-4f83-ba06-ff46636c1bb0\") " pod="openshift-logging/collector-x8dp4" Jan 28 16:51:05 crc kubenswrapper[4877]: I0128 16:51:05.626370 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-x8dp4" Jan 28 16:51:06 crc kubenswrapper[4877]: I0128 16:51:06.064671 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-x8dp4"] Jan 28 16:51:06 crc kubenswrapper[4877]: I0128 16:51:06.227884 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-x8dp4" event={"ID":"6fed2369-2dba-4f83-ba06-ff46636c1bb0","Type":"ContainerStarted","Data":"0344e08843f7375841cb459f2a09e7aa3b6cffd24ba27a43e1dd91383afb78ed"} Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.076800 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.076908 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.076975 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.077974 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"50d22942424d37ecd19189f5b9ed73adaeed0500bb228f84257ef8a11bc4937c"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.078058 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://50d22942424d37ecd19189f5b9ed73adaeed0500bb228f84257ef8a11bc4937c" gracePeriod=600 Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.248292 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="50d22942424d37ecd19189f5b9ed73adaeed0500bb228f84257ef8a11bc4937c" exitCode=0 Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.248359 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"50d22942424d37ecd19189f5b9ed73adaeed0500bb228f84257ef8a11bc4937c"} Jan 28 16:51:07 crc kubenswrapper[4877]: I0128 16:51:07.248841 4877 scope.go:117] "RemoveContainer" containerID="009b4d91077431e907042d1b0b9c62d4fcd7c95df803c80a1b641a0306375a2c" Jan 28 16:51:08 crc kubenswrapper[4877]: I0128 16:51:08.269195 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"7c4a3e8fea9bc1e25b572220a93b200bdf216a51c64b746a15d1ef6b91b206c8"} Jan 28 16:51:16 crc kubenswrapper[4877]: I0128 16:51:16.346394 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-x8dp4" event={"ID":"6fed2369-2dba-4f83-ba06-ff46636c1bb0","Type":"ContainerStarted","Data":"bee70ed703680ed0318431db68ee37478320e3fcd6ee343bcc8e909120240469"} Jan 28 16:51:16 crc kubenswrapper[4877]: I0128 16:51:16.380715 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-x8dp4" podStartSLOduration=2.04812359 podStartE2EDuration="11.380697086s" podCreationTimestamp="2026-01-28 16:51:05 +0000 UTC" firstStartedPulling="2026-01-28 16:51:06.072717229 +0000 UTC m=+969.631044117" lastFinishedPulling="2026-01-28 16:51:15.405290715 +0000 UTC m=+978.963617613" observedRunningTime="2026-01-28 16:51:16.374423806 +0000 UTC m=+979.932750694" watchObservedRunningTime="2026-01-28 16:51:16.380697086 +0000 UTC m=+979.939023974" Jan 28 16:51:44 crc kubenswrapper[4877]: I0128 16:51:44.750717 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr"] Jan 28 16:51:44 crc kubenswrapper[4877]: I0128 16:51:44.756388 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:44 crc kubenswrapper[4877]: I0128 16:51:44.760064 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr"] Jan 28 16:51:44 crc kubenswrapper[4877]: I0128 16:51:44.761862 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 16:51:44 crc kubenswrapper[4877]: I0128 16:51:44.903790 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:44 crc kubenswrapper[4877]: I0128 16:51:44.903901 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7747t\" (UniqueName: \"kubernetes.io/projected/faea8d79-6f7b-46ec-8f17-01fe08396c32-kube-api-access-7747t\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:44 crc kubenswrapper[4877]: I0128 16:51:44.904387 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.006090 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.006267 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7747t\" (UniqueName: \"kubernetes.io/projected/faea8d79-6f7b-46ec-8f17-01fe08396c32-kube-api-access-7747t\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.006423 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.006858 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.006955 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.032190 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7747t\" (UniqueName: \"kubernetes.io/projected/faea8d79-6f7b-46ec-8f17-01fe08396c32-kube-api-access-7747t\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.086775 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.549417 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr"] Jan 28 16:51:45 crc kubenswrapper[4877]: I0128 16:51:45.586691 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" event={"ID":"faea8d79-6f7b-46ec-8f17-01fe08396c32","Type":"ContainerStarted","Data":"be176386696dec54fd048c4c286ba1acb7ec66fd989130d23897464f9226208a"} Jan 28 16:51:46 crc kubenswrapper[4877]: I0128 16:51:46.597368 4877 generic.go:334] "Generic (PLEG): container finished" podID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerID="246f91e50944b74488255e7afab8a55150720cdb39fff43066deee928a393056" exitCode=0 Jan 28 16:51:46 crc kubenswrapper[4877]: I0128 16:51:46.597442 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" event={"ID":"faea8d79-6f7b-46ec-8f17-01fe08396c32","Type":"ContainerDied","Data":"246f91e50944b74488255e7afab8a55150720cdb39fff43066deee928a393056"} Jan 28 16:51:48 crc kubenswrapper[4877]: I0128 16:51:48.615604 4877 generic.go:334] "Generic (PLEG): container finished" podID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerID="ce20794ac482c607f4bf5d40e9bec9cde32a189dda0d1ecda945995aca651788" exitCode=0 Jan 28 16:51:48 crc kubenswrapper[4877]: I0128 16:51:48.615764 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" event={"ID":"faea8d79-6f7b-46ec-8f17-01fe08396c32","Type":"ContainerDied","Data":"ce20794ac482c607f4bf5d40e9bec9cde32a189dda0d1ecda945995aca651788"} Jan 28 16:51:49 crc kubenswrapper[4877]: I0128 16:51:49.627718 4877 generic.go:334] "Generic (PLEG): container finished" podID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerID="f35a1eaee2fa28b9d0f7efd4bf2f9d5a5698f903ea1c40a9093868b478c4d574" exitCode=0 Jan 28 16:51:49 crc kubenswrapper[4877]: I0128 16:51:49.627893 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" event={"ID":"faea8d79-6f7b-46ec-8f17-01fe08396c32","Type":"ContainerDied","Data":"f35a1eaee2fa28b9d0f7efd4bf2f9d5a5698f903ea1c40a9093868b478c4d574"} Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.034192 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.130759 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-bundle\") pod \"faea8d79-6f7b-46ec-8f17-01fe08396c32\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.130894 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-util\") pod \"faea8d79-6f7b-46ec-8f17-01fe08396c32\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.131007 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7747t\" (UniqueName: \"kubernetes.io/projected/faea8d79-6f7b-46ec-8f17-01fe08396c32-kube-api-access-7747t\") pod \"faea8d79-6f7b-46ec-8f17-01fe08396c32\" (UID: \"faea8d79-6f7b-46ec-8f17-01fe08396c32\") " Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.132359 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-bundle" (OuterVolumeSpecName: "bundle") pod "faea8d79-6f7b-46ec-8f17-01fe08396c32" (UID: "faea8d79-6f7b-46ec-8f17-01fe08396c32"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.138418 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/faea8d79-6f7b-46ec-8f17-01fe08396c32-kube-api-access-7747t" (OuterVolumeSpecName: "kube-api-access-7747t") pod "faea8d79-6f7b-46ec-8f17-01fe08396c32" (UID: "faea8d79-6f7b-46ec-8f17-01fe08396c32"). InnerVolumeSpecName "kube-api-access-7747t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.233668 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7747t\" (UniqueName: \"kubernetes.io/projected/faea8d79-6f7b-46ec-8f17-01fe08396c32-kube-api-access-7747t\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.233712 4877 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.256261 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-util" (OuterVolumeSpecName: "util") pod "faea8d79-6f7b-46ec-8f17-01fe08396c32" (UID: "faea8d79-6f7b-46ec-8f17-01fe08396c32"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.335621 4877 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/faea8d79-6f7b-46ec-8f17-01fe08396c32-util\") on node \"crc\" DevicePath \"\"" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.652516 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" event={"ID":"faea8d79-6f7b-46ec-8f17-01fe08396c32","Type":"ContainerDied","Data":"be176386696dec54fd048c4c286ba1acb7ec66fd989130d23897464f9226208a"} Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.652573 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be176386696dec54fd048c4c286ba1acb7ec66fd989130d23897464f9226208a" Jan 28 16:51:51 crc kubenswrapper[4877]: I0128 16:51:51.652643 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138pzpr" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.545933 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-xh6lt"] Jan 28 16:51:57 crc kubenswrapper[4877]: E0128 16:51:57.547193 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerName="pull" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.547209 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerName="pull" Jan 28 16:51:57 crc kubenswrapper[4877]: E0128 16:51:57.547228 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerName="extract" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.547235 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerName="extract" Jan 28 16:51:57 crc kubenswrapper[4877]: E0128 16:51:57.547255 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerName="util" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.547262 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerName="util" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.547430 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="faea8d79-6f7b-46ec-8f17-01fe08396c32" containerName="extract" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.548168 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.550605 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.550870 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.551091 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-p57lt" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.565584 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-xh6lt"] Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.656574 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkhrw\" (UniqueName: \"kubernetes.io/projected/cfca8cbd-3308-42de-89ab-0acf6863da86-kube-api-access-dkhrw\") pod \"nmstate-operator-646758c888-xh6lt\" (UID: \"cfca8cbd-3308-42de-89ab-0acf6863da86\") " pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.758684 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkhrw\" (UniqueName: \"kubernetes.io/projected/cfca8cbd-3308-42de-89ab-0acf6863da86-kube-api-access-dkhrw\") pod \"nmstate-operator-646758c888-xh6lt\" (UID: \"cfca8cbd-3308-42de-89ab-0acf6863da86\") " pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.805266 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkhrw\" (UniqueName: \"kubernetes.io/projected/cfca8cbd-3308-42de-89ab-0acf6863da86-kube-api-access-dkhrw\") pod \"nmstate-operator-646758c888-xh6lt\" (UID: \"cfca8cbd-3308-42de-89ab-0acf6863da86\") " pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" Jan 28 16:51:57 crc kubenswrapper[4877]: I0128 16:51:57.876080 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" Jan 28 16:51:58 crc kubenswrapper[4877]: I0128 16:51:58.459938 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-xh6lt"] Jan 28 16:51:58 crc kubenswrapper[4877]: I0128 16:51:58.462467 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 16:51:58 crc kubenswrapper[4877]: I0128 16:51:58.710444 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" event={"ID":"cfca8cbd-3308-42de-89ab-0acf6863da86","Type":"ContainerStarted","Data":"ae22dd39dbca8cda00601196a9f28bc4f544a6340a153aeae7eac411da4d27d7"} Jan 28 16:52:02 crc kubenswrapper[4877]: I0128 16:52:02.746549 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" event={"ID":"cfca8cbd-3308-42de-89ab-0acf6863da86","Type":"ContainerStarted","Data":"3850c8da105327ac8677986ffb0bb0291be66f1d254ea137bb7dab14b145d452"} Jan 28 16:52:02 crc kubenswrapper[4877]: I0128 16:52:02.774069 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-xh6lt" podStartSLOduration=2.511711211 podStartE2EDuration="5.774022812s" podCreationTimestamp="2026-01-28 16:51:57 +0000 UTC" firstStartedPulling="2026-01-28 16:51:58.462215671 +0000 UTC m=+1022.020542569" lastFinishedPulling="2026-01-28 16:52:01.724527272 +0000 UTC m=+1025.282854170" observedRunningTime="2026-01-28 16:52:02.764196196 +0000 UTC m=+1026.322523104" watchObservedRunningTime="2026-01-28 16:52:02.774022812 +0000 UTC m=+1026.332349690" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.233847 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-cgxpv"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.235694 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.237982 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-lfnpn" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.262006 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-cgxpv"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.270191 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.271284 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.274206 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.315248 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.320417 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/74e879cb-e2db-473a-9248-112f04f25fa5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mh62n\" (UID: \"74e879cb-e2db-473a-9248-112f04f25fa5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.320506 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f98tm\" (UniqueName: \"kubernetes.io/projected/74e879cb-e2db-473a-9248-112f04f25fa5-kube-api-access-f98tm\") pod \"nmstate-webhook-8474b5b9d8-mh62n\" (UID: \"74e879cb-e2db-473a-9248-112f04f25fa5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.320634 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rm474\" (UniqueName: \"kubernetes.io/projected/49b04356-7b50-425a-850c-984244a0c38b-kube-api-access-rm474\") pod \"nmstate-metrics-54757c584b-cgxpv\" (UID: \"49b04356-7b50-425a-850c-984244a0c38b\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.346099 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-bvbmb"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.347362 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.422528 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f98tm\" (UniqueName: \"kubernetes.io/projected/74e879cb-e2db-473a-9248-112f04f25fa5-kube-api-access-f98tm\") pod \"nmstate-webhook-8474b5b9d8-mh62n\" (UID: \"74e879cb-e2db-473a-9248-112f04f25fa5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.422622 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjkj9\" (UniqueName: \"kubernetes.io/projected/30e67463-395b-4cf1-b661-5e014d798278-kube-api-access-vjkj9\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.422713 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rm474\" (UniqueName: \"kubernetes.io/projected/49b04356-7b50-425a-850c-984244a0c38b-kube-api-access-rm474\") pod \"nmstate-metrics-54757c584b-cgxpv\" (UID: \"49b04356-7b50-425a-850c-984244a0c38b\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.422772 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-dbus-socket\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.422858 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-ovs-socket\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.422901 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/74e879cb-e2db-473a-9248-112f04f25fa5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mh62n\" (UID: \"74e879cb-e2db-473a-9248-112f04f25fa5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.422954 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-nmstate-lock\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: E0128 16:52:05.424700 4877 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 28 16:52:05 crc kubenswrapper[4877]: E0128 16:52:05.424770 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/74e879cb-e2db-473a-9248-112f04f25fa5-tls-key-pair podName:74e879cb-e2db-473a-9248-112f04f25fa5 nodeName:}" failed. No retries permitted until 2026-01-28 16:52:05.924745002 +0000 UTC m=+1029.483071890 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/74e879cb-e2db-473a-9248-112f04f25fa5-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-mh62n" (UID: "74e879cb-e2db-473a-9248-112f04f25fa5") : secret "openshift-nmstate-webhook" not found Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.489519 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f98tm\" (UniqueName: \"kubernetes.io/projected/74e879cb-e2db-473a-9248-112f04f25fa5-kube-api-access-f98tm\") pod \"nmstate-webhook-8474b5b9d8-mh62n\" (UID: \"74e879cb-e2db-473a-9248-112f04f25fa5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.514602 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rm474\" (UniqueName: \"kubernetes.io/projected/49b04356-7b50-425a-850c-984244a0c38b-kube-api-access-rm474\") pod \"nmstate-metrics-54757c584b-cgxpv\" (UID: \"49b04356-7b50-425a-850c-984244a0c38b\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.517294 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.518723 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.546597 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjkj9\" (UniqueName: \"kubernetes.io/projected/30e67463-395b-4cf1-b661-5e014d798278-kube-api-access-vjkj9\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.546718 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-dbus-socket\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.546761 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-ovs-socket\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.546822 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-nmstate-lock\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.546966 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-nmstate-lock\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.547537 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-dbus-socket\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.547573 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/30e67463-395b-4cf1-b661-5e014d798278-ovs-socket\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.567446 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.577164 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.577867 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.578159 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-jb82h" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.599257 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.603334 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjkj9\" (UniqueName: \"kubernetes.io/projected/30e67463-395b-4cf1-b661-5e014d798278-kube-api-access-vjkj9\") pod \"nmstate-handler-bvbmb\" (UID: \"30e67463-395b-4cf1-b661-5e014d798278\") " pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.648893 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqckr\" (UniqueName: \"kubernetes.io/projected/56c22478-8327-4b18-ad6a-4caea5d25aad-kube-api-access-wqckr\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.648978 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/56c22478-8327-4b18-ad6a-4caea5d25aad-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.649025 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/56c22478-8327-4b18-ad6a-4caea5d25aad-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.663900 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:05 crc kubenswrapper[4877]: W0128 16:52:05.736601 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30e67463_395b_4cf1_b661_5e014d798278.slice/crio-247600b807106aa57bb20aa38a1690f0b49c4a625d5ae2b40e5cadba2b65c97e WatchSource:0}: Error finding container 247600b807106aa57bb20aa38a1690f0b49c4a625d5ae2b40e5cadba2b65c97e: Status 404 returned error can't find the container with id 247600b807106aa57bb20aa38a1690f0b49c4a625d5ae2b40e5cadba2b65c97e Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.759670 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqckr\" (UniqueName: \"kubernetes.io/projected/56c22478-8327-4b18-ad6a-4caea5d25aad-kube-api-access-wqckr\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: E0128 16:52:05.762264 4877 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 28 16:52:05 crc kubenswrapper[4877]: E0128 16:52:05.762371 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/56c22478-8327-4b18-ad6a-4caea5d25aad-plugin-serving-cert podName:56c22478-8327-4b18-ad6a-4caea5d25aad nodeName:}" failed. No retries permitted until 2026-01-28 16:52:06.26234009 +0000 UTC m=+1029.820666978 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/56c22478-8327-4b18-ad6a-4caea5d25aad-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-kwqrp" (UID: "56c22478-8327-4b18-ad6a-4caea5d25aad") : secret "plugin-serving-cert" not found Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.762836 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/56c22478-8327-4b18-ad6a-4caea5d25aad-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.763961 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/56c22478-8327-4b18-ad6a-4caea5d25aad-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.769224 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5dbb47b56f-hd7gx"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.769325 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/56c22478-8327-4b18-ad6a-4caea5d25aad-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.770933 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.788550 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqckr\" (UniqueName: \"kubernetes.io/projected/56c22478-8327-4b18-ad6a-4caea5d25aad-kube-api-access-wqckr\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.794777 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bvbmb" event={"ID":"30e67463-395b-4cf1-b661-5e014d798278","Type":"ContainerStarted","Data":"247600b807106aa57bb20aa38a1690f0b49c4a625d5ae2b40e5cadba2b65c97e"} Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.800298 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5dbb47b56f-hd7gx"] Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.873344 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-oauth-serving-cert\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.873463 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-trusted-ca-bundle\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.873534 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-oauth-config\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.873605 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-config\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.873665 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6255\" (UniqueName: \"kubernetes.io/projected/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-kube-api-access-f6255\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.873779 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-service-ca\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.873810 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-serving-cert\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976301 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-trusted-ca-bundle\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976369 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-oauth-config\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976428 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-config\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976454 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6255\" (UniqueName: \"kubernetes.io/projected/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-kube-api-access-f6255\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976548 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/74e879cb-e2db-473a-9248-112f04f25fa5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mh62n\" (UID: \"74e879cb-e2db-473a-9248-112f04f25fa5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976598 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-service-ca\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976627 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-serving-cert\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.976692 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-oauth-serving-cert\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.977515 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-config\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.977535 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-oauth-serving-cert\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.978416 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-service-ca\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.981017 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-trusted-ca-bundle\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.982272 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-oauth-config\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.984779 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-serving-cert\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.986647 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/74e879cb-e2db-473a-9248-112f04f25fa5-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mh62n\" (UID: \"74e879cb-e2db-473a-9248-112f04f25fa5\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:05 crc kubenswrapper[4877]: I0128 16:52:05.996000 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6255\" (UniqueName: \"kubernetes.io/projected/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-kube-api-access-f6255\") pod \"console-5dbb47b56f-hd7gx\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.093047 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.203122 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.286513 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/56c22478-8327-4b18-ad6a-4caea5d25aad-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.294214 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/56c22478-8327-4b18-ad6a-4caea5d25aad-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-kwqrp\" (UID: \"56c22478-8327-4b18-ad6a-4caea5d25aad\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.298627 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.316944 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-cgxpv"] Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.573539 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5dbb47b56f-hd7gx"] Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.675909 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp"] Jan 28 16:52:06 crc kubenswrapper[4877]: W0128 16:52:06.685247 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56c22478_8327_4b18_ad6a_4caea5d25aad.slice/crio-24114082cfecd03202dd38d2166f12a419e514781be8f6282490e6fccb5f2777 WatchSource:0}: Error finding container 24114082cfecd03202dd38d2166f12a419e514781be8f6282490e6fccb5f2777: Status 404 returned error can't find the container with id 24114082cfecd03202dd38d2166f12a419e514781be8f6282490e6fccb5f2777 Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.729990 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n"] Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.807514 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" event={"ID":"74e879cb-e2db-473a-9248-112f04f25fa5","Type":"ContainerStarted","Data":"8f7af13e7e9cef12cea5121aa526f3efd45d18f8d3492ff6af39a2c1c32a980d"} Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.809417 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" event={"ID":"56c22478-8327-4b18-ad6a-4caea5d25aad","Type":"ContainerStarted","Data":"24114082cfecd03202dd38d2166f12a419e514781be8f6282490e6fccb5f2777"} Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.812107 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" event={"ID":"49b04356-7b50-425a-850c-984244a0c38b","Type":"ContainerStarted","Data":"d212daf3aa350fb4bdc698de14ded7e017b0f03a37667635930e89a2253c6620"} Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.814459 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dbb47b56f-hd7gx" event={"ID":"d3001a64-58f6-4e84-bf0a-fa4f5889ecef","Type":"ContainerStarted","Data":"a561fe02e6e73ad39a8b019daa59f68e508489df916fa2458e914548631defcc"} Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.814521 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dbb47b56f-hd7gx" event={"ID":"d3001a64-58f6-4e84-bf0a-fa4f5889ecef","Type":"ContainerStarted","Data":"4db7c6ea2faf81099b4c20dcc99aa91cbcebea3968f8b19fc1154f7344074860"} Jan 28 16:52:06 crc kubenswrapper[4877]: I0128 16:52:06.848448 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5dbb47b56f-hd7gx" podStartSLOduration=1.848417593 podStartE2EDuration="1.848417593s" podCreationTimestamp="2026-01-28 16:52:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:52:06.834377502 +0000 UTC m=+1030.392704410" watchObservedRunningTime="2026-01-28 16:52:06.848417593 +0000 UTC m=+1030.406744481" Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.887904 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" event={"ID":"74e879cb-e2db-473a-9248-112f04f25fa5","Type":"ContainerStarted","Data":"32e44ba4d9a155f93a58b1803226af065eae6be1f3ee532786cdcc2e86fa4c67"} Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.888814 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.893757 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" event={"ID":"56c22478-8327-4b18-ad6a-4caea5d25aad","Type":"ContainerStarted","Data":"8fa5b3a9f1434f5c4acca74200c89b4d7c433e8c6ee6dc6376bee2db4a75ff72"} Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.900033 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" event={"ID":"49b04356-7b50-425a-850c-984244a0c38b","Type":"ContainerStarted","Data":"ba869afdfb9a2d7994fe402db4de9c6cd298daf6e95b66582e9f7084a0edb3d4"} Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.909207 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.921444 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" podStartSLOduration=2.193851984 podStartE2EDuration="4.921422808s" podCreationTimestamp="2026-01-28 16:52:05 +0000 UTC" firstStartedPulling="2026-01-28 16:52:06.735595393 +0000 UTC m=+1030.293922281" lastFinishedPulling="2026-01-28 16:52:09.463166217 +0000 UTC m=+1033.021493105" observedRunningTime="2026-01-28 16:52:09.921047318 +0000 UTC m=+1033.479374206" watchObservedRunningTime="2026-01-28 16:52:09.921422808 +0000 UTC m=+1033.479749696" Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.941620 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-kwqrp" podStartSLOduration=2.179019952 podStartE2EDuration="4.941605556s" podCreationTimestamp="2026-01-28 16:52:05 +0000 UTC" firstStartedPulling="2026-01-28 16:52:06.688394862 +0000 UTC m=+1030.246721750" lastFinishedPulling="2026-01-28 16:52:09.450980466 +0000 UTC m=+1033.009307354" observedRunningTime="2026-01-28 16:52:09.939725695 +0000 UTC m=+1033.498052593" watchObservedRunningTime="2026-01-28 16:52:09.941605556 +0000 UTC m=+1033.499932444" Jan 28 16:52:09 crc kubenswrapper[4877]: I0128 16:52:09.979143 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-bvbmb" podStartSLOduration=1.2718635520000001 podStartE2EDuration="4.979115403s" podCreationTimestamp="2026-01-28 16:52:05 +0000 UTC" firstStartedPulling="2026-01-28 16:52:05.74353837 +0000 UTC m=+1029.301865258" lastFinishedPulling="2026-01-28 16:52:09.450790221 +0000 UTC m=+1033.009117109" observedRunningTime="2026-01-28 16:52:09.970334125 +0000 UTC m=+1033.528661013" watchObservedRunningTime="2026-01-28 16:52:09.979115403 +0000 UTC m=+1033.537442291" Jan 28 16:52:10 crc kubenswrapper[4877]: I0128 16:52:10.921280 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bvbmb" event={"ID":"30e67463-395b-4cf1-b661-5e014d798278","Type":"ContainerStarted","Data":"4b6fa5bb992a4782e97d46ab06aad73061016a6b33510a859e0919f91d75893a"} Jan 28 16:52:12 crc kubenswrapper[4877]: I0128 16:52:12.946737 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" event={"ID":"49b04356-7b50-425a-850c-984244a0c38b","Type":"ContainerStarted","Data":"9308fb3244a5d4078bdcf9f8078ddff725ce433edadfce49ba3d033aabadddf0"} Jan 28 16:52:12 crc kubenswrapper[4877]: I0128 16:52:12.981462 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-cgxpv" podStartSLOduration=2.030727909 podStartE2EDuration="7.981438631s" podCreationTimestamp="2026-01-28 16:52:05 +0000 UTC" firstStartedPulling="2026-01-28 16:52:06.331582872 +0000 UTC m=+1029.889909760" lastFinishedPulling="2026-01-28 16:52:12.282293594 +0000 UTC m=+1035.840620482" observedRunningTime="2026-01-28 16:52:12.974793871 +0000 UTC m=+1036.533120789" watchObservedRunningTime="2026-01-28 16:52:12.981438631 +0000 UTC m=+1036.539765519" Jan 28 16:52:15 crc kubenswrapper[4877]: I0128 16:52:15.704561 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-bvbmb" Jan 28 16:52:16 crc kubenswrapper[4877]: I0128 16:52:16.093892 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:16 crc kubenswrapper[4877]: I0128 16:52:16.093961 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:16 crc kubenswrapper[4877]: I0128 16:52:16.101079 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:16 crc kubenswrapper[4877]: I0128 16:52:16.993783 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:52:17 crc kubenswrapper[4877]: I0128 16:52:17.066962 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5df55ff997-m9q7p"] Jan 28 16:52:26 crc kubenswrapper[4877]: I0128 16:52:26.210373 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.117247 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5df55ff997-m9q7p" podUID="e6b64fae-3c03-438f-a673-28a924ce0f6d" containerName="console" containerID="cri-o://3828c43525f16efed913d692131b709adf9bc1f1ed300a6475030c1e08ae8751" gracePeriod=15 Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.352281 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5df55ff997-m9q7p_e6b64fae-3c03-438f-a673-28a924ce0f6d/console/0.log" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.352644 4877 generic.go:334] "Generic (PLEG): container finished" podID="e6b64fae-3c03-438f-a673-28a924ce0f6d" containerID="3828c43525f16efed913d692131b709adf9bc1f1ed300a6475030c1e08ae8751" exitCode=2 Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.352687 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5df55ff997-m9q7p" event={"ID":"e6b64fae-3c03-438f-a673-28a924ce0f6d","Type":"ContainerDied","Data":"3828c43525f16efed913d692131b709adf9bc1f1ed300a6475030c1e08ae8751"} Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.583692 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5df55ff997-m9q7p_e6b64fae-3c03-438f-a673-28a924ce0f6d/console/0.log" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.584090 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718047 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-trusted-ca-bundle\") pod \"e6b64fae-3c03-438f-a673-28a924ce0f6d\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718155 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-serving-cert\") pod \"e6b64fae-3c03-438f-a673-28a924ce0f6d\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718307 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-service-ca\") pod \"e6b64fae-3c03-438f-a673-28a924ce0f6d\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718362 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkvzw\" (UniqueName: \"kubernetes.io/projected/e6b64fae-3c03-438f-a673-28a924ce0f6d-kube-api-access-kkvzw\") pod \"e6b64fae-3c03-438f-a673-28a924ce0f6d\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718401 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-oauth-serving-cert\") pod \"e6b64fae-3c03-438f-a673-28a924ce0f6d\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718419 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-config\") pod \"e6b64fae-3c03-438f-a673-28a924ce0f6d\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718592 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-oauth-config\") pod \"e6b64fae-3c03-438f-a673-28a924ce0f6d\" (UID: \"e6b64fae-3c03-438f-a673-28a924ce0f6d\") " Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.718998 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "e6b64fae-3c03-438f-a673-28a924ce0f6d" (UID: "e6b64fae-3c03-438f-a673-28a924ce0f6d"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.720066 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-config" (OuterVolumeSpecName: "console-config") pod "e6b64fae-3c03-438f-a673-28a924ce0f6d" (UID: "e6b64fae-3c03-438f-a673-28a924ce0f6d"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.720258 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "e6b64fae-3c03-438f-a673-28a924ce0f6d" (UID: "e6b64fae-3c03-438f-a673-28a924ce0f6d"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.720667 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-service-ca" (OuterVolumeSpecName: "service-ca") pod "e6b64fae-3c03-438f-a673-28a924ce0f6d" (UID: "e6b64fae-3c03-438f-a673-28a924ce0f6d"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.725343 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "e6b64fae-3c03-438f-a673-28a924ce0f6d" (UID: "e6b64fae-3c03-438f-a673-28a924ce0f6d"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.725556 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "e6b64fae-3c03-438f-a673-28a924ce0f6d" (UID: "e6b64fae-3c03-438f-a673-28a924ce0f6d"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.731433 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6b64fae-3c03-438f-a673-28a924ce0f6d-kube-api-access-kkvzw" (OuterVolumeSpecName: "kube-api-access-kkvzw") pod "e6b64fae-3c03-438f-a673-28a924ce0f6d" (UID: "e6b64fae-3c03-438f-a673-28a924ce0f6d"). InnerVolumeSpecName "kube-api-access-kkvzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.821851 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.821898 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkvzw\" (UniqueName: \"kubernetes.io/projected/e6b64fae-3c03-438f-a673-28a924ce0f6d-kube-api-access-kkvzw\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.821913 4877 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.821922 4877 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.821936 4877 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.821946 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6b64fae-3c03-438f-a673-28a924ce0f6d-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:42 crc kubenswrapper[4877]: I0128 16:52:42.821958 4877 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e6b64fae-3c03-438f-a673-28a924ce0f6d-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:43 crc kubenswrapper[4877]: I0128 16:52:43.369249 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5df55ff997-m9q7p_e6b64fae-3c03-438f-a673-28a924ce0f6d/console/0.log" Jan 28 16:52:43 crc kubenswrapper[4877]: I0128 16:52:43.369330 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5df55ff997-m9q7p" event={"ID":"e6b64fae-3c03-438f-a673-28a924ce0f6d","Type":"ContainerDied","Data":"91c4668587a88550a83b91f017bd67d1e443bb46916b7116cdd9f182dd7893b1"} Jan 28 16:52:43 crc kubenswrapper[4877]: I0128 16:52:43.369385 4877 scope.go:117] "RemoveContainer" containerID="3828c43525f16efed913d692131b709adf9bc1f1ed300a6475030c1e08ae8751" Jan 28 16:52:43 crc kubenswrapper[4877]: I0128 16:52:43.369661 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5df55ff997-m9q7p" Jan 28 16:52:43 crc kubenswrapper[4877]: I0128 16:52:43.413567 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5df55ff997-m9q7p"] Jan 28 16:52:43 crc kubenswrapper[4877]: I0128 16:52:43.419821 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5df55ff997-m9q7p"] Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.339494 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6b64fae-3c03-438f-a673-28a924ce0f6d" path="/var/lib/kubelet/pods/e6b64fae-3c03-438f-a673-28a924ce0f6d/volumes" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.728123 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc"] Jan 28 16:52:45 crc kubenswrapper[4877]: E0128 16:52:45.728542 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6b64fae-3c03-438f-a673-28a924ce0f6d" containerName="console" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.728558 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6b64fae-3c03-438f-a673-28a924ce0f6d" containerName="console" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.728742 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6b64fae-3c03-438f-a673-28a924ce0f6d" containerName="console" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.730190 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.732685 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.736656 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc"] Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.887882 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vt55n\" (UniqueName: \"kubernetes.io/projected/9176a964-8479-4b7f-a4bc-04c79625a736-kube-api-access-vt55n\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.888223 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.888278 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.989845 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.989942 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.990000 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vt55n\" (UniqueName: \"kubernetes.io/projected/9176a964-8479-4b7f-a4bc-04c79625a736-kube-api-access-vt55n\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.990616 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:45 crc kubenswrapper[4877]: I0128 16:52:45.990946 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:46 crc kubenswrapper[4877]: I0128 16:52:46.009791 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vt55n\" (UniqueName: \"kubernetes.io/projected/9176a964-8479-4b7f-a4bc-04c79625a736-kube-api-access-vt55n\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:46 crc kubenswrapper[4877]: I0128 16:52:46.051816 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:46 crc kubenswrapper[4877]: I0128 16:52:46.585939 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc"] Jan 28 16:52:47 crc kubenswrapper[4877]: I0128 16:52:47.407903 4877 generic.go:334] "Generic (PLEG): container finished" podID="9176a964-8479-4b7f-a4bc-04c79625a736" containerID="e112dcc36ca8ab0f9ddefad86973fb6fd44fe63f00e12edecf95e0de5998fd29" exitCode=0 Jan 28 16:52:47 crc kubenswrapper[4877]: I0128 16:52:47.408018 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" event={"ID":"9176a964-8479-4b7f-a4bc-04c79625a736","Type":"ContainerDied","Data":"e112dcc36ca8ab0f9ddefad86973fb6fd44fe63f00e12edecf95e0de5998fd29"} Jan 28 16:52:47 crc kubenswrapper[4877]: I0128 16:52:47.408197 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" event={"ID":"9176a964-8479-4b7f-a4bc-04c79625a736","Type":"ContainerStarted","Data":"5cd3f3f4ceee449b5fe0f87515c83e4dcb2ec705461482b0f682ff5a4cb2b4c4"} Jan 28 16:52:49 crc kubenswrapper[4877]: I0128 16:52:49.428430 4877 generic.go:334] "Generic (PLEG): container finished" podID="9176a964-8479-4b7f-a4bc-04c79625a736" containerID="7667a2b257d2ca5ab492c92489afe771af501ad47bacbd54fd2fe75b30cd4c5a" exitCode=0 Jan 28 16:52:49 crc kubenswrapper[4877]: I0128 16:52:49.428543 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" event={"ID":"9176a964-8479-4b7f-a4bc-04c79625a736","Type":"ContainerDied","Data":"7667a2b257d2ca5ab492c92489afe771af501ad47bacbd54fd2fe75b30cd4c5a"} Jan 28 16:52:50 crc kubenswrapper[4877]: I0128 16:52:50.442383 4877 generic.go:334] "Generic (PLEG): container finished" podID="9176a964-8479-4b7f-a4bc-04c79625a736" containerID="d5e378e7f97db8106346df71ca59763a19f4b9daf8aa11194dc47f4123b58a02" exitCode=0 Jan 28 16:52:50 crc kubenswrapper[4877]: I0128 16:52:50.442540 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" event={"ID":"9176a964-8479-4b7f-a4bc-04c79625a736","Type":"ContainerDied","Data":"d5e378e7f97db8106346df71ca59763a19f4b9daf8aa11194dc47f4123b58a02"} Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.795334 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.923801 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-util\") pod \"9176a964-8479-4b7f-a4bc-04c79625a736\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.923869 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-bundle\") pod \"9176a964-8479-4b7f-a4bc-04c79625a736\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.926551 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-bundle" (OuterVolumeSpecName: "bundle") pod "9176a964-8479-4b7f-a4bc-04c79625a736" (UID: "9176a964-8479-4b7f-a4bc-04c79625a736"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.923942 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt55n\" (UniqueName: \"kubernetes.io/projected/9176a964-8479-4b7f-a4bc-04c79625a736-kube-api-access-vt55n\") pod \"9176a964-8479-4b7f-a4bc-04c79625a736\" (UID: \"9176a964-8479-4b7f-a4bc-04c79625a736\") " Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.927539 4877 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.941763 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9176a964-8479-4b7f-a4bc-04c79625a736-kube-api-access-vt55n" (OuterVolumeSpecName: "kube-api-access-vt55n") pod "9176a964-8479-4b7f-a4bc-04c79625a736" (UID: "9176a964-8479-4b7f-a4bc-04c79625a736"). InnerVolumeSpecName "kube-api-access-vt55n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:52:51 crc kubenswrapper[4877]: I0128 16:52:51.957231 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-util" (OuterVolumeSpecName: "util") pod "9176a964-8479-4b7f-a4bc-04c79625a736" (UID: "9176a964-8479-4b7f-a4bc-04c79625a736"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:52:52 crc kubenswrapper[4877]: I0128 16:52:52.029637 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt55n\" (UniqueName: \"kubernetes.io/projected/9176a964-8479-4b7f-a4bc-04c79625a736-kube-api-access-vt55n\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:52 crc kubenswrapper[4877]: I0128 16:52:52.029675 4877 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9176a964-8479-4b7f-a4bc-04c79625a736-util\") on node \"crc\" DevicePath \"\"" Jan 28 16:52:52 crc kubenswrapper[4877]: I0128 16:52:52.468312 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" event={"ID":"9176a964-8479-4b7f-a4bc-04c79625a736","Type":"ContainerDied","Data":"5cd3f3f4ceee449b5fe0f87515c83e4dcb2ec705461482b0f682ff5a4cb2b4c4"} Jan 28 16:52:52 crc kubenswrapper[4877]: I0128 16:52:52.468364 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5cd3f3f4ceee449b5fe0f87515c83e4dcb2ec705461482b0f682ff5a4cb2b4c4" Jan 28 16:52:52 crc kubenswrapper[4877]: I0128 16:52:52.468426 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcsckfc" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.817543 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv"] Jan 28 16:53:01 crc kubenswrapper[4877]: E0128 16:53:01.818755 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9176a964-8479-4b7f-a4bc-04c79625a736" containerName="extract" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.818775 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9176a964-8479-4b7f-a4bc-04c79625a736" containerName="extract" Jan 28 16:53:01 crc kubenswrapper[4877]: E0128 16:53:01.818794 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9176a964-8479-4b7f-a4bc-04c79625a736" containerName="util" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.818801 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9176a964-8479-4b7f-a4bc-04c79625a736" containerName="util" Jan 28 16:53:01 crc kubenswrapper[4877]: E0128 16:53:01.818828 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9176a964-8479-4b7f-a4bc-04c79625a736" containerName="pull" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.818837 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9176a964-8479-4b7f-a4bc-04c79625a736" containerName="pull" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.819017 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="9176a964-8479-4b7f-a4bc-04c79625a736" containerName="extract" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.819856 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.823689 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.824313 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.824379 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-5sp74" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.829675 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.838720 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.855569 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv"] Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.942639 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fa1ce1f-9222-4147-a89f-9d621b1afc80-apiservice-cert\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.943056 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fa1ce1f-9222-4147-a89f-9d621b1afc80-webhook-cert\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:01 crc kubenswrapper[4877]: I0128 16:53:01.943078 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v555s\" (UniqueName: \"kubernetes.io/projected/7fa1ce1f-9222-4147-a89f-9d621b1afc80-kube-api-access-v555s\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.044593 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fa1ce1f-9222-4147-a89f-9d621b1afc80-apiservice-cert\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.045949 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fa1ce1f-9222-4147-a89f-9d621b1afc80-webhook-cert\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.045980 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v555s\" (UniqueName: \"kubernetes.io/projected/7fa1ce1f-9222-4147-a89f-9d621b1afc80-kube-api-access-v555s\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.055639 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fa1ce1f-9222-4147-a89f-9d621b1afc80-webhook-cert\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.057256 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fa1ce1f-9222-4147-a89f-9d621b1afc80-apiservice-cert\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.066852 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v555s\" (UniqueName: \"kubernetes.io/projected/7fa1ce1f-9222-4147-a89f-9d621b1afc80-kube-api-access-v555s\") pod \"metallb-operator-controller-manager-7bd999d8bc-qpskv\" (UID: \"7fa1ce1f-9222-4147-a89f-9d621b1afc80\") " pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.154100 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.262259 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r"] Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.264102 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.267559 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.267731 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-f4vd8" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.269914 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.293869 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r"] Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.350875 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/59f588be-7008-4941-a210-ba17edc1ff30-webhook-cert\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.351284 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnrvh\" (UniqueName: \"kubernetes.io/projected/59f588be-7008-4941-a210-ba17edc1ff30-kube-api-access-qnrvh\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.351317 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/59f588be-7008-4941-a210-ba17edc1ff30-apiservice-cert\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.452613 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnrvh\" (UniqueName: \"kubernetes.io/projected/59f588be-7008-4941-a210-ba17edc1ff30-kube-api-access-qnrvh\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.452676 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/59f588be-7008-4941-a210-ba17edc1ff30-apiservice-cert\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.452800 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/59f588be-7008-4941-a210-ba17edc1ff30-webhook-cert\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.457869 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/59f588be-7008-4941-a210-ba17edc1ff30-webhook-cert\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.474129 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/59f588be-7008-4941-a210-ba17edc1ff30-apiservice-cert\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.476615 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnrvh\" (UniqueName: \"kubernetes.io/projected/59f588be-7008-4941-a210-ba17edc1ff30-kube-api-access-qnrvh\") pod \"metallb-operator-webhook-server-9b8bf7874-4g47r\" (UID: \"59f588be-7008-4941-a210-ba17edc1ff30\") " pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.589755 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:02 crc kubenswrapper[4877]: I0128 16:53:02.758286 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv"] Jan 28 16:53:03 crc kubenswrapper[4877]: I0128 16:53:03.130998 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r"] Jan 28 16:53:03 crc kubenswrapper[4877]: I0128 16:53:03.566656 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" event={"ID":"59f588be-7008-4941-a210-ba17edc1ff30","Type":"ContainerStarted","Data":"f944f2a83969de92053aa20eccce217de09f9bf59e7b377f60445a4510018f39"} Jan 28 16:53:03 crc kubenswrapper[4877]: I0128 16:53:03.569092 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" event={"ID":"7fa1ce1f-9222-4147-a89f-9d621b1afc80","Type":"ContainerStarted","Data":"32e5139bcb15d93de67745cbc9667dfae2a6a4637b5265639cad77c1c7915e33"} Jan 28 16:53:07 crc kubenswrapper[4877]: I0128 16:53:07.076577 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:53:07 crc kubenswrapper[4877]: I0128 16:53:07.077135 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:53:09 crc kubenswrapper[4877]: I0128 16:53:09.625829 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" event={"ID":"59f588be-7008-4941-a210-ba17edc1ff30","Type":"ContainerStarted","Data":"6fcb41c2d8eb900160c97c34c108a52e68ade356f55a22065d2062bf9a60915e"} Jan 28 16:53:09 crc kubenswrapper[4877]: I0128 16:53:09.627122 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:09 crc kubenswrapper[4877]: I0128 16:53:09.629846 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" event={"ID":"7fa1ce1f-9222-4147-a89f-9d621b1afc80","Type":"ContainerStarted","Data":"0f313c000c0162bb16360be4ec245c54a49a99a50ad59c7f41ef8cfbbabe70bc"} Jan 28 16:53:09 crc kubenswrapper[4877]: I0128 16:53:09.654799 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" podStartSLOduration=2.498473032 podStartE2EDuration="7.654773022s" podCreationTimestamp="2026-01-28 16:53:02 +0000 UTC" firstStartedPulling="2026-01-28 16:53:03.139990913 +0000 UTC m=+1086.698317801" lastFinishedPulling="2026-01-28 16:53:08.296290903 +0000 UTC m=+1091.854617791" observedRunningTime="2026-01-28 16:53:09.652062649 +0000 UTC m=+1093.210389567" watchObservedRunningTime="2026-01-28 16:53:09.654773022 +0000 UTC m=+1093.213099910" Jan 28 16:53:09 crc kubenswrapper[4877]: I0128 16:53:09.679556 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" podStartSLOduration=3.187625039 podStartE2EDuration="8.679525922s" podCreationTimestamp="2026-01-28 16:53:01 +0000 UTC" firstStartedPulling="2026-01-28 16:53:02.786762694 +0000 UTC m=+1086.345089582" lastFinishedPulling="2026-01-28 16:53:08.278663577 +0000 UTC m=+1091.836990465" observedRunningTime="2026-01-28 16:53:09.673208741 +0000 UTC m=+1093.231535629" watchObservedRunningTime="2026-01-28 16:53:09.679525922 +0000 UTC m=+1093.237852820" Jan 28 16:53:10 crc kubenswrapper[4877]: I0128 16:53:10.638095 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:22 crc kubenswrapper[4877]: I0128 16:53:22.596511 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" Jan 28 16:53:37 crc kubenswrapper[4877]: I0128 16:53:37.076750 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:53:37 crc kubenswrapper[4877]: I0128 16:53:37.077320 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.161419 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7bd999d8bc-qpskv" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.906673 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-qn628"] Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.912708 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.915359 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.915886 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-qhlwc" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.916238 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.920782 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl"] Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.922193 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.928895 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 28 16:53:42 crc kubenswrapper[4877]: I0128 16:53:42.941122 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl"] Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.001926 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btfq2\" (UniqueName: \"kubernetes.io/projected/e25ef10a-92ae-45b2-9467-7f15b523a8a1-kube-api-access-btfq2\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002002 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-startup\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002086 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e25ef10a-92ae-45b2-9467-7f15b523a8a1-metrics-certs\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002126 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c59vq\" (UniqueName: \"kubernetes.io/projected/04c21939-4136-40d4-9569-ea4f0bc523c4-kube-api-access-c59vq\") pod \"frr-k8s-webhook-server-7df86c4f6c-8znpl\" (UID: \"04c21939-4136-40d4-9569-ea4f0bc523c4\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002154 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-metrics\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002243 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-sockets\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002301 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-conf\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002338 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-reloader\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.002370 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/04c21939-4136-40d4-9569-ea4f0bc523c4-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-8znpl\" (UID: \"04c21939-4136-40d4-9569-ea4f0bc523c4\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.042417 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-s7mdg"] Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.044274 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.049585 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.051706 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-d6kch" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.053656 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.053876 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.073011 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-jjmp5"] Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.074605 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.081122 4877 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.096296 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-jjmp5"] Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104277 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e25ef10a-92ae-45b2-9467-7f15b523a8a1-metrics-certs\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104340 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c59vq\" (UniqueName: \"kubernetes.io/projected/04c21939-4136-40d4-9569-ea4f0bc523c4-kube-api-access-c59vq\") pod \"frr-k8s-webhook-server-7df86c4f6c-8znpl\" (UID: \"04c21939-4136-40d4-9569-ea4f0bc523c4\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104370 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-metrics\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104550 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104585 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-sockets\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104620 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1f8e6a17-6325-49ef-88be-7be71a431bd9-metallb-excludel2\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104652 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-conf\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104683 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-reloader\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104706 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/04c21939-4136-40d4-9569-ea4f0bc523c4-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-8znpl\" (UID: \"04c21939-4136-40d4-9569-ea4f0bc523c4\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104733 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-metrics-certs\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104772 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpxms\" (UniqueName: \"kubernetes.io/projected/1f8e6a17-6325-49ef-88be-7be71a431bd9-kube-api-access-lpxms\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104810 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btfq2\" (UniqueName: \"kubernetes.io/projected/e25ef10a-92ae-45b2-9467-7f15b523a8a1-kube-api-access-btfq2\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.104836 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-startup\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.105779 4877 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.105887 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/04c21939-4136-40d4-9569-ea4f0bc523c4-cert podName:04c21939-4136-40d4-9569-ea4f0bc523c4 nodeName:}" failed. No retries permitted until 2026-01-28 16:53:43.605857538 +0000 UTC m=+1127.164184426 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/04c21939-4136-40d4-9569-ea4f0bc523c4-cert") pod "frr-k8s-webhook-server-7df86c4f6c-8znpl" (UID: "04c21939-4136-40d4-9569-ea4f0bc523c4") : secret "frr-k8s-webhook-server-cert" not found Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.207170 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb4q4\" (UniqueName: \"kubernetes.io/projected/49ccfeed-5c96-428f-8758-50d6e1967655-kube-api-access-qb4q4\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.207239 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1f8e6a17-6325-49ef-88be-7be71a431bd9-metallb-excludel2\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.207309 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-metrics-certs\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.207341 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpxms\" (UniqueName: \"kubernetes.io/projected/1f8e6a17-6325-49ef-88be-7be71a431bd9-kube-api-access-lpxms\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.207360 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-metrics-certs\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.207447 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-cert\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.207510 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.207703 4877 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.207763 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist podName:1f8e6a17-6325-49ef-88be-7be71a431bd9 nodeName:}" failed. No retries permitted until 2026-01-28 16:53:43.707742677 +0000 UTC m=+1127.266069565 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist") pod "speaker-s7mdg" (UID: "1f8e6a17-6325-49ef-88be-7be71a431bd9") : secret "metallb-memberlist" not found Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.207840 4877 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.207951 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-metrics-certs podName:1f8e6a17-6325-49ef-88be-7be71a431bd9 nodeName:}" failed. No retries permitted until 2026-01-28 16:53:43.707918372 +0000 UTC m=+1127.266245260 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-metrics-certs") pod "speaker-s7mdg" (UID: "1f8e6a17-6325-49ef-88be-7be71a431bd9") : secret "speaker-certs-secret" not found Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.309456 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-metrics-certs\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.309668 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-cert\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.309767 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb4q4\" (UniqueName: \"kubernetes.io/projected/49ccfeed-5c96-428f-8758-50d6e1967655-kube-api-access-qb4q4\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.310339 4877 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.310405 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-metrics-certs podName:49ccfeed-5c96-428f-8758-50d6e1967655 nodeName:}" failed. No retries permitted until 2026-01-28 16:53:43.810384858 +0000 UTC m=+1127.368711746 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-metrics-certs") pod "controller-6968d8fdc4-jjmp5" (UID: "49ccfeed-5c96-428f-8758-50d6e1967655") : secret "controller-certs-secret" not found Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.314885 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-cert\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.315952 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1f8e6a17-6325-49ef-88be-7be71a431bd9-metallb-excludel2\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.315959 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-startup\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.316238 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpxms\" (UniqueName: \"kubernetes.io/projected/1f8e6a17-6325-49ef-88be-7be71a431bd9-kube-api-access-lpxms\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.321998 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-metrics\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.324813 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c59vq\" (UniqueName: \"kubernetes.io/projected/04c21939-4136-40d4-9569-ea4f0bc523c4-kube-api-access-c59vq\") pod \"frr-k8s-webhook-server-7df86c4f6c-8znpl\" (UID: \"04c21939-4136-40d4-9569-ea4f0bc523c4\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.326972 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb4q4\" (UniqueName: \"kubernetes.io/projected/49ccfeed-5c96-428f-8758-50d6e1967655-kube-api-access-qb4q4\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.359254 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-reloader\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.359490 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-conf\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.359568 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e25ef10a-92ae-45b2-9467-7f15b523a8a1-frr-sockets\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.359812 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e25ef10a-92ae-45b2-9467-7f15b523a8a1-metrics-certs\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.360088 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btfq2\" (UniqueName: \"kubernetes.io/projected/e25ef10a-92ae-45b2-9467-7f15b523a8a1-kube-api-access-btfq2\") pod \"frr-k8s-qn628\" (UID: \"e25ef10a-92ae-45b2-9467-7f15b523a8a1\") " pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.544388 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.617595 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/04c21939-4136-40d4-9569-ea4f0bc523c4-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-8znpl\" (UID: \"04c21939-4136-40d4-9569-ea4f0bc523c4\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.625223 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/04c21939-4136-40d4-9569-ea4f0bc523c4-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-8znpl\" (UID: \"04c21939-4136-40d4-9569-ea4f0bc523c4\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.719984 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-metrics-certs\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.720538 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.720696 4877 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 16:53:43 crc kubenswrapper[4877]: E0128 16:53:43.720749 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist podName:1f8e6a17-6325-49ef-88be-7be71a431bd9 nodeName:}" failed. No retries permitted until 2026-01-28 16:53:44.720734663 +0000 UTC m=+1128.279061551 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist") pod "speaker-s7mdg" (UID: "1f8e6a17-6325-49ef-88be-7be71a431bd9") : secret "metallb-memberlist" not found Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.724348 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-metrics-certs\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.822710 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-metrics-certs\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.826980 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/49ccfeed-5c96-428f-8758-50d6e1967655-metrics-certs\") pod \"controller-6968d8fdc4-jjmp5\" (UID: \"49ccfeed-5c96-428f-8758-50d6e1967655\") " pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.862946 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:43 crc kubenswrapper[4877]: I0128 16:53:43.939829 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"98d6ab4bcde342809d2c65dc355ce965edcc4868d2030148625e419604f68e3c"} Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.004142 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.262864 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-jjmp5"] Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.312679 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl"] Jan 28 16:53:44 crc kubenswrapper[4877]: W0128 16:53:44.321968 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04c21939_4136_40d4_9569_ea4f0bc523c4.slice/crio-741fd6c5a575fdb31a4309940a65c3972ea231ba7c877865310f8bdd8e9fe23c WatchSource:0}: Error finding container 741fd6c5a575fdb31a4309940a65c3972ea231ba7c877865310f8bdd8e9fe23c: Status 404 returned error can't find the container with id 741fd6c5a575fdb31a4309940a65c3972ea231ba7c877865310f8bdd8e9fe23c Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.741504 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.748705 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1f8e6a17-6325-49ef-88be-7be71a431bd9-memberlist\") pod \"speaker-s7mdg\" (UID: \"1f8e6a17-6325-49ef-88be-7be71a431bd9\") " pod="metallb-system/speaker-s7mdg" Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.816076 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-s7mdg" Jan 28 16:53:44 crc kubenswrapper[4877]: W0128 16:53:44.846913 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f8e6a17_6325_49ef_88be_7be71a431bd9.slice/crio-12def4c4b3e8b9aaa54e9a2a7ffb04343b3dec9fd573e46a6761bfe3da012223 WatchSource:0}: Error finding container 12def4c4b3e8b9aaa54e9a2a7ffb04343b3dec9fd573e46a6761bfe3da012223: Status 404 returned error can't find the container with id 12def4c4b3e8b9aaa54e9a2a7ffb04343b3dec9fd573e46a6761bfe3da012223 Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.952183 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" event={"ID":"04c21939-4136-40d4-9569-ea4f0bc523c4","Type":"ContainerStarted","Data":"741fd6c5a575fdb31a4309940a65c3972ea231ba7c877865310f8bdd8e9fe23c"} Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.954716 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-s7mdg" event={"ID":"1f8e6a17-6325-49ef-88be-7be71a431bd9","Type":"ContainerStarted","Data":"12def4c4b3e8b9aaa54e9a2a7ffb04343b3dec9fd573e46a6761bfe3da012223"} Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.963023 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-jjmp5" event={"ID":"49ccfeed-5c96-428f-8758-50d6e1967655","Type":"ContainerStarted","Data":"c322499d53fa740c1dc7f9dd0696d9e488bcefd84c7daba296abb0001c21b42e"} Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.963054 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-jjmp5" event={"ID":"49ccfeed-5c96-428f-8758-50d6e1967655","Type":"ContainerStarted","Data":"7b71df1d665551d8324c4bc5d324906db5f3768ac4c8f0b77b4226201786d551"} Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.963066 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-jjmp5" event={"ID":"49ccfeed-5c96-428f-8758-50d6e1967655","Type":"ContainerStarted","Data":"07a2cb5f1f456998368e648423380120a998539f9b6b9ea3dbf84dc9f2373c54"} Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.963365 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:44 crc kubenswrapper[4877]: I0128 16:53:44.988442 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-jjmp5" podStartSLOduration=1.988411101 podStartE2EDuration="1.988411101s" podCreationTimestamp="2026-01-28 16:53:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:53:44.977652 +0000 UTC m=+1128.535978898" watchObservedRunningTime="2026-01-28 16:53:44.988411101 +0000 UTC m=+1128.546738009" Jan 28 16:53:45 crc kubenswrapper[4877]: I0128 16:53:45.976588 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-s7mdg" event={"ID":"1f8e6a17-6325-49ef-88be-7be71a431bd9","Type":"ContainerStarted","Data":"b3e87b2d3a89e6e9f875ca01dd0275c42915b379814ca9a13ea24c799c3f8004"} Jan 28 16:53:45 crc kubenswrapper[4877]: I0128 16:53:45.977106 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-s7mdg" event={"ID":"1f8e6a17-6325-49ef-88be-7be71a431bd9","Type":"ContainerStarted","Data":"5ab968bfce77a7d16794cca1c302e28c1018a354d2ec2a847c81498a61aae63f"} Jan 28 16:53:46 crc kubenswrapper[4877]: I0128 16:53:46.007156 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-s7mdg" podStartSLOduration=3.007127376 podStartE2EDuration="3.007127376s" podCreationTimestamp="2026-01-28 16:53:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:53:46.000928057 +0000 UTC m=+1129.559254945" watchObservedRunningTime="2026-01-28 16:53:46.007127376 +0000 UTC m=+1129.565454264" Jan 28 16:53:47 crc kubenswrapper[4877]: I0128 16:53:47.022676 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-s7mdg" Jan 28 16:53:53 crc kubenswrapper[4877]: I0128 16:53:53.084356 4877 generic.go:334] "Generic (PLEG): container finished" podID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerID="c8e6661540e8dafffff7881a558c2abf16d3d18b50e31e64e903394b2d89f076" exitCode=0 Jan 28 16:53:53 crc kubenswrapper[4877]: I0128 16:53:53.084463 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerDied","Data":"c8e6661540e8dafffff7881a558c2abf16d3d18b50e31e64e903394b2d89f076"} Jan 28 16:53:53 crc kubenswrapper[4877]: I0128 16:53:53.091318 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" event={"ID":"04c21939-4136-40d4-9569-ea4f0bc523c4","Type":"ContainerStarted","Data":"fcd2761a006aed450bdb8698873c6fe36fe4ec271bd6594c4cb4c7ab290c7bc9"} Jan 28 16:53:53 crc kubenswrapper[4877]: I0128 16:53:53.091670 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:53:53 crc kubenswrapper[4877]: I0128 16:53:53.167370 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" podStartSLOduration=3.254882499 podStartE2EDuration="11.167339788s" podCreationTimestamp="2026-01-28 16:53:42 +0000 UTC" firstStartedPulling="2026-01-28 16:53:44.325370941 +0000 UTC m=+1127.883697849" lastFinishedPulling="2026-01-28 16:53:52.23782821 +0000 UTC m=+1135.796155138" observedRunningTime="2026-01-28 16:53:53.159177757 +0000 UTC m=+1136.717504665" watchObservedRunningTime="2026-01-28 16:53:53.167339788 +0000 UTC m=+1136.725666676" Jan 28 16:53:54 crc kubenswrapper[4877]: I0128 16:53:54.009646 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-jjmp5" Jan 28 16:53:54 crc kubenswrapper[4877]: I0128 16:53:54.104419 4877 generic.go:334] "Generic (PLEG): container finished" podID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerID="ef1f9eb4546f6ae912741a7145568d7f553f75e7048d5696557eb45aa5819965" exitCode=0 Jan 28 16:53:54 crc kubenswrapper[4877]: I0128 16:53:54.104558 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerDied","Data":"ef1f9eb4546f6ae912741a7145568d7f553f75e7048d5696557eb45aa5819965"} Jan 28 16:53:55 crc kubenswrapper[4877]: I0128 16:53:55.124468 4877 generic.go:334] "Generic (PLEG): container finished" podID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerID="333f8d7f9ec7216accb2f01c67f52dc39639f326692a6eaa8714f6fda61315b6" exitCode=0 Jan 28 16:53:55 crc kubenswrapper[4877]: I0128 16:53:55.124511 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerDied","Data":"333f8d7f9ec7216accb2f01c67f52dc39639f326692a6eaa8714f6fda61315b6"} Jan 28 16:53:56 crc kubenswrapper[4877]: I0128 16:53:56.136920 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"6d5d00a6ba04e418eb5cfaf2cdd7a85ad8498b3449ac73745e2e11ecb7b34ef8"} Jan 28 16:53:56 crc kubenswrapper[4877]: I0128 16:53:56.137310 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"b81b168607a243305e3217050f4ec7145490811c62e701d012542c27f2969b41"} Jan 28 16:53:56 crc kubenswrapper[4877]: I0128 16:53:56.137325 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"561bde7bc7ae894d313633b07dccec72c699062ee8e24689cd0512da349b1a45"} Jan 28 16:53:56 crc kubenswrapper[4877]: I0128 16:53:56.137346 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"615e8f68029597b333017bdeec7de9fac311eb0fe14559de00603402f219f7e3"} Jan 28 16:53:56 crc kubenswrapper[4877]: I0128 16:53:56.137361 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"43b4b3abf674d402cbd88f2c8d8a7b7e8bf298843abe783f00c1b744e21a2e17"} Jan 28 16:53:57 crc kubenswrapper[4877]: I0128 16:53:57.152898 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"c7d61f97c639d4eaba69f2e9050c8d11637f4ae6ab47bedc15735fc055023ebc"} Jan 28 16:53:57 crc kubenswrapper[4877]: I0128 16:53:57.153383 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:57 crc kubenswrapper[4877]: I0128 16:53:57.188266 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-qn628" podStartSLOduration=6.756324133 podStartE2EDuration="15.188235153s" podCreationTimestamp="2026-01-28 16:53:42 +0000 UTC" firstStartedPulling="2026-01-28 16:53:43.781707014 +0000 UTC m=+1127.340033902" lastFinishedPulling="2026-01-28 16:53:52.213618034 +0000 UTC m=+1135.771944922" observedRunningTime="2026-01-28 16:53:57.179672111 +0000 UTC m=+1140.737999019" watchObservedRunningTime="2026-01-28 16:53:57.188235153 +0000 UTC m=+1140.746562041" Jan 28 16:53:58 crc kubenswrapper[4877]: I0128 16:53:58.544934 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-qn628" Jan 28 16:53:58 crc kubenswrapper[4877]: I0128 16:53:58.597149 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-qn628" Jan 28 16:54:03 crc kubenswrapper[4877]: I0128 16:54:03.867247 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" Jan 28 16:54:04 crc kubenswrapper[4877]: I0128 16:54:04.824689 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-s7mdg" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.076037 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.076495 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.076636 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.077880 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7c4a3e8fea9bc1e25b572220a93b200bdf216a51c64b746a15d1ef6b91b206c8"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.077986 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://7c4a3e8fea9bc1e25b572220a93b200bdf216a51c64b746a15d1ef6b91b206c8" gracePeriod=600 Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.266297 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="7c4a3e8fea9bc1e25b572220a93b200bdf216a51c64b746a15d1ef6b91b206c8" exitCode=0 Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.266416 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"7c4a3e8fea9bc1e25b572220a93b200bdf216a51c64b746a15d1ef6b91b206c8"} Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.267702 4877 scope.go:117] "RemoveContainer" containerID="50d22942424d37ecd19189f5b9ed73adaeed0500bb228f84257ef8a11bc4937c" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.769409 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-855m5"] Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.771533 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-855m5" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.783112 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.783611 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-sdtfm" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.783882 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.790772 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-855m5"] Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.884716 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc6w7\" (UniqueName: \"kubernetes.io/projected/e6e00674-4ff9-41a5-a688-25afc59fce7b-kube-api-access-vc6w7\") pod \"openstack-operator-index-855m5\" (UID: \"e6e00674-4ff9-41a5-a688-25afc59fce7b\") " pod="openstack-operators/openstack-operator-index-855m5" Jan 28 16:54:07 crc kubenswrapper[4877]: I0128 16:54:07.986530 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc6w7\" (UniqueName: \"kubernetes.io/projected/e6e00674-4ff9-41a5-a688-25afc59fce7b-kube-api-access-vc6w7\") pod \"openstack-operator-index-855m5\" (UID: \"e6e00674-4ff9-41a5-a688-25afc59fce7b\") " pod="openstack-operators/openstack-operator-index-855m5" Jan 28 16:54:08 crc kubenswrapper[4877]: I0128 16:54:08.014639 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc6w7\" (UniqueName: \"kubernetes.io/projected/e6e00674-4ff9-41a5-a688-25afc59fce7b-kube-api-access-vc6w7\") pod \"openstack-operator-index-855m5\" (UID: \"e6e00674-4ff9-41a5-a688-25afc59fce7b\") " pod="openstack-operators/openstack-operator-index-855m5" Jan 28 16:54:08 crc kubenswrapper[4877]: I0128 16:54:08.092720 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-855m5" Jan 28 16:54:08 crc kubenswrapper[4877]: I0128 16:54:08.298777 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"f756cfb15c7c947e0f669ee8051d31638e5edc388c7a044a2e8411c49dfcce24"} Jan 28 16:54:08 crc kubenswrapper[4877]: I0128 16:54:08.583075 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-855m5"] Jan 28 16:54:08 crc kubenswrapper[4877]: W0128 16:54:08.589384 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode6e00674_4ff9_41a5_a688_25afc59fce7b.slice/crio-7391a069524d19fcdcd407fe4b16d63b534fa6462657eddd56768447825516ef WatchSource:0}: Error finding container 7391a069524d19fcdcd407fe4b16d63b534fa6462657eddd56768447825516ef: Status 404 returned error can't find the container with id 7391a069524d19fcdcd407fe4b16d63b534fa6462657eddd56768447825516ef Jan 28 16:54:09 crc kubenswrapper[4877]: I0128 16:54:09.308580 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-855m5" event={"ID":"e6e00674-4ff9-41a5-a688-25afc59fce7b","Type":"ContainerStarted","Data":"7391a069524d19fcdcd407fe4b16d63b534fa6462657eddd56768447825516ef"} Jan 28 16:54:10 crc kubenswrapper[4877]: I0128 16:54:10.949022 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-855m5"] Jan 28 16:54:11 crc kubenswrapper[4877]: I0128 16:54:11.553559 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-sgvqs"] Jan 28 16:54:11 crc kubenswrapper[4877]: I0128 16:54:11.555034 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:11 crc kubenswrapper[4877]: I0128 16:54:11.576613 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-sgvqs"] Jan 28 16:54:11 crc kubenswrapper[4877]: I0128 16:54:11.668264 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx6cq\" (UniqueName: \"kubernetes.io/projected/2921bb76-4308-4082-ab32-4dc817ccac74-kube-api-access-mx6cq\") pod \"openstack-operator-index-sgvqs\" (UID: \"2921bb76-4308-4082-ab32-4dc817ccac74\") " pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:11 crc kubenswrapper[4877]: I0128 16:54:11.770668 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx6cq\" (UniqueName: \"kubernetes.io/projected/2921bb76-4308-4082-ab32-4dc817ccac74-kube-api-access-mx6cq\") pod \"openstack-operator-index-sgvqs\" (UID: \"2921bb76-4308-4082-ab32-4dc817ccac74\") " pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:11 crc kubenswrapper[4877]: I0128 16:54:11.798322 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx6cq\" (UniqueName: \"kubernetes.io/projected/2921bb76-4308-4082-ab32-4dc817ccac74-kube-api-access-mx6cq\") pod \"openstack-operator-index-sgvqs\" (UID: \"2921bb76-4308-4082-ab32-4dc817ccac74\") " pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:11 crc kubenswrapper[4877]: I0128 16:54:11.886847 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:12 crc kubenswrapper[4877]: I0128 16:54:12.959446 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-sgvqs"] Jan 28 16:54:13 crc kubenswrapper[4877]: I0128 16:54:13.548553 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-qn628" Jan 28 16:54:14 crc kubenswrapper[4877]: I0128 16:54:14.358239 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-855m5" event={"ID":"e6e00674-4ff9-41a5-a688-25afc59fce7b","Type":"ContainerStarted","Data":"0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c"} Jan 28 16:54:14 crc kubenswrapper[4877]: I0128 16:54:14.358458 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-855m5" podUID="e6e00674-4ff9-41a5-a688-25afc59fce7b" containerName="registry-server" containerID="cri-o://0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c" gracePeriod=2 Jan 28 16:54:14 crc kubenswrapper[4877]: I0128 16:54:14.364080 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sgvqs" event={"ID":"2921bb76-4308-4082-ab32-4dc817ccac74","Type":"ContainerStarted","Data":"a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6"} Jan 28 16:54:14 crc kubenswrapper[4877]: I0128 16:54:14.364153 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sgvqs" event={"ID":"2921bb76-4308-4082-ab32-4dc817ccac74","Type":"ContainerStarted","Data":"77b4651fe0544e01b0ed5ab4df45e85ef4f6df3407e91730d284e8b1fdafc105"} Jan 28 16:54:14 crc kubenswrapper[4877]: I0128 16:54:14.390979 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-855m5" podStartSLOduration=2.282901968 podStartE2EDuration="7.390946505s" podCreationTimestamp="2026-01-28 16:54:07 +0000 UTC" firstStartedPulling="2026-01-28 16:54:08.592572539 +0000 UTC m=+1152.150899427" lastFinishedPulling="2026-01-28 16:54:13.700617076 +0000 UTC m=+1157.258943964" observedRunningTime="2026-01-28 16:54:14.382751333 +0000 UTC m=+1157.941078221" watchObservedRunningTime="2026-01-28 16:54:14.390946505 +0000 UTC m=+1157.949273393" Jan 28 16:54:14 crc kubenswrapper[4877]: I0128 16:54:14.407171 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-sgvqs" podStartSLOduration=3.354750325 podStartE2EDuration="3.407146144s" podCreationTimestamp="2026-01-28 16:54:11 +0000 UTC" firstStartedPulling="2026-01-28 16:54:13.648911085 +0000 UTC m=+1157.207237973" lastFinishedPulling="2026-01-28 16:54:13.701306904 +0000 UTC m=+1157.259633792" observedRunningTime="2026-01-28 16:54:14.39997368 +0000 UTC m=+1157.958300568" watchObservedRunningTime="2026-01-28 16:54:14.407146144 +0000 UTC m=+1157.965473032" Jan 28 16:54:14 crc kubenswrapper[4877]: I0128 16:54:14.995822 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-855m5" Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.139327 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc6w7\" (UniqueName: \"kubernetes.io/projected/e6e00674-4ff9-41a5-a688-25afc59fce7b-kube-api-access-vc6w7\") pod \"e6e00674-4ff9-41a5-a688-25afc59fce7b\" (UID: \"e6e00674-4ff9-41a5-a688-25afc59fce7b\") " Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.147205 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6e00674-4ff9-41a5-a688-25afc59fce7b-kube-api-access-vc6w7" (OuterVolumeSpecName: "kube-api-access-vc6w7") pod "e6e00674-4ff9-41a5-a688-25afc59fce7b" (UID: "e6e00674-4ff9-41a5-a688-25afc59fce7b"). InnerVolumeSpecName "kube-api-access-vc6w7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.242634 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc6w7\" (UniqueName: \"kubernetes.io/projected/e6e00674-4ff9-41a5-a688-25afc59fce7b-kube-api-access-vc6w7\") on node \"crc\" DevicePath \"\"" Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.378206 4877 generic.go:334] "Generic (PLEG): container finished" podID="e6e00674-4ff9-41a5-a688-25afc59fce7b" containerID="0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c" exitCode=0 Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.378348 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-855m5" Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.379116 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-855m5" event={"ID":"e6e00674-4ff9-41a5-a688-25afc59fce7b","Type":"ContainerDied","Data":"0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c"} Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.379151 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-855m5" event={"ID":"e6e00674-4ff9-41a5-a688-25afc59fce7b","Type":"ContainerDied","Data":"7391a069524d19fcdcd407fe4b16d63b534fa6462657eddd56768447825516ef"} Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.379170 4877 scope.go:117] "RemoveContainer" containerID="0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c" Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.407972 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-855m5"] Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.415033 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-855m5"] Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.415600 4877 scope.go:117] "RemoveContainer" containerID="0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c" Jan 28 16:54:15 crc kubenswrapper[4877]: E0128 16:54:15.416205 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c\": container with ID starting with 0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c not found: ID does not exist" containerID="0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c" Jan 28 16:54:15 crc kubenswrapper[4877]: I0128 16:54:15.416262 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c"} err="failed to get container status \"0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c\": rpc error: code = NotFound desc = could not find container \"0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c\": container with ID starting with 0a2742c7c93c563270baf02ef1fa03eb794bc6418c9d2c91d7b40f012798182c not found: ID does not exist" Jan 28 16:54:17 crc kubenswrapper[4877]: I0128 16:54:17.343806 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6e00674-4ff9-41a5-a688-25afc59fce7b" path="/var/lib/kubelet/pods/e6e00674-4ff9-41a5-a688-25afc59fce7b/volumes" Jan 28 16:54:21 crc kubenswrapper[4877]: I0128 16:54:21.887596 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:21 crc kubenswrapper[4877]: I0128 16:54:21.888688 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:21 crc kubenswrapper[4877]: I0128 16:54:21.914689 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:22 crc kubenswrapper[4877]: I0128 16:54:22.481205 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.396520 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb"] Jan 28 16:54:23 crc kubenswrapper[4877]: E0128 16:54:23.397073 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6e00674-4ff9-41a5-a688-25afc59fce7b" containerName="registry-server" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.397094 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6e00674-4ff9-41a5-a688-25afc59fce7b" containerName="registry-server" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.397312 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6e00674-4ff9-41a5-a688-25afc59fce7b" containerName="registry-server" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.398982 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.402905 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-25bzs" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.410738 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb"] Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.522884 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-bundle\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.523074 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-util\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.523178 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54kbm\" (UniqueName: \"kubernetes.io/projected/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-kube-api-access-54kbm\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.624625 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-util\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.624739 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54kbm\" (UniqueName: \"kubernetes.io/projected/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-kube-api-access-54kbm\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.624819 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-bundle\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.625495 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-util\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.625582 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-bundle\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.647472 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54kbm\" (UniqueName: \"kubernetes.io/projected/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-kube-api-access-54kbm\") pod \"95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:23 crc kubenswrapper[4877]: I0128 16:54:23.734015 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:24 crc kubenswrapper[4877]: I0128 16:54:24.186836 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb"] Jan 28 16:54:24 crc kubenswrapper[4877]: I0128 16:54:24.475162 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" event={"ID":"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6","Type":"ContainerStarted","Data":"b3602ac8192842cfe7f973e7d00f131b892addecffad7b68e2203833073646b3"} Jan 28 16:54:24 crc kubenswrapper[4877]: I0128 16:54:24.475237 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" event={"ID":"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6","Type":"ContainerStarted","Data":"8783c01ffa9bf7174b23f66301626245a68de09e4df9bd36f34bee1b08e4198f"} Jan 28 16:54:25 crc kubenswrapper[4877]: I0128 16:54:25.489351 4877 generic.go:334] "Generic (PLEG): container finished" podID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerID="b3602ac8192842cfe7f973e7d00f131b892addecffad7b68e2203833073646b3" exitCode=0 Jan 28 16:54:25 crc kubenswrapper[4877]: I0128 16:54:25.492586 4877 generic.go:334] "Generic (PLEG): container finished" podID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerID="18b792add60280248ce37812acbd11cd6ff44c8b9fc4f78b3bea78f0f3f1f0dc" exitCode=0 Jan 28 16:54:25 crc kubenswrapper[4877]: I0128 16:54:25.489456 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" event={"ID":"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6","Type":"ContainerDied","Data":"b3602ac8192842cfe7f973e7d00f131b892addecffad7b68e2203833073646b3"} Jan 28 16:54:25 crc kubenswrapper[4877]: I0128 16:54:25.492699 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" event={"ID":"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6","Type":"ContainerDied","Data":"18b792add60280248ce37812acbd11cd6ff44c8b9fc4f78b3bea78f0f3f1f0dc"} Jan 28 16:54:26 crc kubenswrapper[4877]: I0128 16:54:26.502617 4877 generic.go:334] "Generic (PLEG): container finished" podID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerID="efa60d3426976c2a5ab3df99ad0d82d607a7ef6a8bcb3bb9508c81fbe61888fd" exitCode=0 Jan 28 16:54:26 crc kubenswrapper[4877]: I0128 16:54:26.502677 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" event={"ID":"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6","Type":"ContainerDied","Data":"efa60d3426976c2a5ab3df99ad0d82d607a7ef6a8bcb3bb9508c81fbe61888fd"} Jan 28 16:54:27 crc kubenswrapper[4877]: I0128 16:54:27.914026 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.020068 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-bundle\") pod \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.020127 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-util\") pod \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.020322 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54kbm\" (UniqueName: \"kubernetes.io/projected/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-kube-api-access-54kbm\") pod \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\" (UID: \"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6\") " Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.020956 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-bundle" (OuterVolumeSpecName: "bundle") pod "a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" (UID: "a28ee92c-3441-41e3-bc35-cb57dd7cf0d6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.027171 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-kube-api-access-54kbm" (OuterVolumeSpecName: "kube-api-access-54kbm") pod "a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" (UID: "a28ee92c-3441-41e3-bc35-cb57dd7cf0d6"). InnerVolumeSpecName "kube-api-access-54kbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.037072 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-util" (OuterVolumeSpecName: "util") pod "a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" (UID: "a28ee92c-3441-41e3-bc35-cb57dd7cf0d6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.123883 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54kbm\" (UniqueName: \"kubernetes.io/projected/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-kube-api-access-54kbm\") on node \"crc\" DevicePath \"\"" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.123921 4877 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.123932 4877 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a28ee92c-3441-41e3-bc35-cb57dd7cf0d6-util\") on node \"crc\" DevicePath \"\"" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.525400 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" event={"ID":"a28ee92c-3441-41e3-bc35-cb57dd7cf0d6","Type":"ContainerDied","Data":"8783c01ffa9bf7174b23f66301626245a68de09e4df9bd36f34bee1b08e4198f"} Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.525455 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8783c01ffa9bf7174b23f66301626245a68de09e4df9bd36f34bee1b08e4198f" Jan 28 16:54:28 crc kubenswrapper[4877]: I0128 16:54:28.525471 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95ab3f23e1c848acc669ed644286a9cf27eb3ce75ec93e1d6fbbe9727ar4rhb" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.454128 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7"] Jan 28 16:54:30 crc kubenswrapper[4877]: E0128 16:54:30.455188 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerName="extract" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.455212 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerName="extract" Jan 28 16:54:30 crc kubenswrapper[4877]: E0128 16:54:30.455245 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerName="util" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.455259 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerName="util" Jan 28 16:54:30 crc kubenswrapper[4877]: E0128 16:54:30.455313 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerName="pull" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.455327 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerName="pull" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.455643 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="a28ee92c-3441-41e3-bc35-cb57dd7cf0d6" containerName="extract" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.456526 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.458566 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-lprg4" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.473941 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7"] Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.576426 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t6g8\" (UniqueName: \"kubernetes.io/projected/c469a239-6ccb-4dd5-8778-3921ec52b6fb-kube-api-access-2t6g8\") pod \"openstack-operator-controller-init-7997469d6c-45lz7\" (UID: \"c469a239-6ccb-4dd5-8778-3921ec52b6fb\") " pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.678831 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t6g8\" (UniqueName: \"kubernetes.io/projected/c469a239-6ccb-4dd5-8778-3921ec52b6fb-kube-api-access-2t6g8\") pod \"openstack-operator-controller-init-7997469d6c-45lz7\" (UID: \"c469a239-6ccb-4dd5-8778-3921ec52b6fb\") " pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.699013 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t6g8\" (UniqueName: \"kubernetes.io/projected/c469a239-6ccb-4dd5-8778-3921ec52b6fb-kube-api-access-2t6g8\") pod \"openstack-operator-controller-init-7997469d6c-45lz7\" (UID: \"c469a239-6ccb-4dd5-8778-3921ec52b6fb\") " pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" Jan 28 16:54:30 crc kubenswrapper[4877]: I0128 16:54:30.786178 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" Jan 28 16:54:31 crc kubenswrapper[4877]: I0128 16:54:31.441592 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7"] Jan 28 16:54:31 crc kubenswrapper[4877]: I0128 16:54:31.558776 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" event={"ID":"c469a239-6ccb-4dd5-8778-3921ec52b6fb","Type":"ContainerStarted","Data":"93a7c589d48d7376117c8cb95c5282ffa3a142cba82187be42bc47a2d19f3d14"} Jan 28 16:54:38 crc kubenswrapper[4877]: I0128 16:54:38.667226 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" event={"ID":"c469a239-6ccb-4dd5-8778-3921ec52b6fb","Type":"ContainerStarted","Data":"f09c85f234328d0fb258f91a3ac1ec2d1eb6b3107909920ff0e14e35317f8205"} Jan 28 16:54:39 crc kubenswrapper[4877]: I0128 16:54:39.676383 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" Jan 28 16:54:39 crc kubenswrapper[4877]: I0128 16:54:39.741726 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" podStartSLOduration=2.802294702 podStartE2EDuration="9.741703488s" podCreationTimestamp="2026-01-28 16:54:30 +0000 UTC" firstStartedPulling="2026-01-28 16:54:31.464538805 +0000 UTC m=+1175.022865693" lastFinishedPulling="2026-01-28 16:54:38.403947591 +0000 UTC m=+1181.962274479" observedRunningTime="2026-01-28 16:54:39.732949326 +0000 UTC m=+1183.291276214" watchObservedRunningTime="2026-01-28 16:54:39.741703488 +0000 UTC m=+1183.300030376" Jan 28 16:54:50 crc kubenswrapper[4877]: I0128 16:54:50.792445 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.001208 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.003368 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.011905 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-cs9zn" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.013233 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.015088 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.016885 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-gq255" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.030888 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.045564 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.056172 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.057940 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.063952 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-6l9xp" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.087115 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.088329 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.094834 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-ltx6w" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.097629 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmhzt\" (UniqueName: \"kubernetes.io/projected/e068371c-e59c-4e57-8fd3-a55470f67063-kube-api-access-fmhzt\") pod \"cinder-operator-controller-manager-7478f7dbf9-qrkvb\" (UID: \"e068371c-e59c-4e57-8fd3-a55470f67063\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.097667 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5lhh\" (UniqueName: \"kubernetes.io/projected/f24bb145-227c-43be-b63a-d606c168241b-kube-api-access-q5lhh\") pod \"barbican-operator-controller-manager-7f86f8796f-4xxbg\" (UID: \"f24bb145-227c-43be-b63a-d606c168241b\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.097785 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p78d\" (UniqueName: \"kubernetes.io/projected/970f0fe7-15e4-4fcf-bca0-eb07b26ba94a-kube-api-access-5p78d\") pod \"designate-operator-controller-manager-b45d7bf98-vsnlm\" (UID: \"970f0fe7-15e4-4fcf-bca0-eb07b26ba94a\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.109987 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.111304 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.114651 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-kfm4d" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.123342 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.141788 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.153826 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.155141 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.182417 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-qjs4s" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.201618 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.202863 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5lhh\" (UniqueName: \"kubernetes.io/projected/f24bb145-227c-43be-b63a-d606c168241b-kube-api-access-q5lhh\") pod \"barbican-operator-controller-manager-7f86f8796f-4xxbg\" (UID: \"f24bb145-227c-43be-b63a-d606c168241b\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.202929 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-988bx\" (UniqueName: \"kubernetes.io/projected/c2d80875-c32f-4596-a44d-6a4b9d524304-kube-api-access-988bx\") pod \"horizon-operator-controller-manager-77d5c5b54f-pvxjd\" (UID: \"c2d80875-c32f-4596-a44d-6a4b9d524304\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.202998 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgwrr\" (UniqueName: \"kubernetes.io/projected/0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0-kube-api-access-jgwrr\") pod \"glance-operator-controller-manager-78fdd796fd-gxvhn\" (UID: \"0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.203038 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld6np\" (UniqueName: \"kubernetes.io/projected/55abdd00-6b2b-44a2-ae22-bae3fbb12282-kube-api-access-ld6np\") pod \"heat-operator-controller-manager-594c8c9d5d-hsh6j\" (UID: \"55abdd00-6b2b-44a2-ae22-bae3fbb12282\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.203066 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p78d\" (UniqueName: \"kubernetes.io/projected/970f0fe7-15e4-4fcf-bca0-eb07b26ba94a-kube-api-access-5p78d\") pod \"designate-operator-controller-manager-b45d7bf98-vsnlm\" (UID: \"970f0fe7-15e4-4fcf-bca0-eb07b26ba94a\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.203109 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmhzt\" (UniqueName: \"kubernetes.io/projected/e068371c-e59c-4e57-8fd3-a55470f67063-kube-api-access-fmhzt\") pod \"cinder-operator-controller-manager-7478f7dbf9-qrkvb\" (UID: \"e068371c-e59c-4e57-8fd3-a55470f67063\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.216211 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.233603 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p78d\" (UniqueName: \"kubernetes.io/projected/970f0fe7-15e4-4fcf-bca0-eb07b26ba94a-kube-api-access-5p78d\") pod \"designate-operator-controller-manager-b45d7bf98-vsnlm\" (UID: \"970f0fe7-15e4-4fcf-bca0-eb07b26ba94a\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.233698 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmhzt\" (UniqueName: \"kubernetes.io/projected/e068371c-e59c-4e57-8fd3-a55470f67063-kube-api-access-fmhzt\") pod \"cinder-operator-controller-manager-7478f7dbf9-qrkvb\" (UID: \"e068371c-e59c-4e57-8fd3-a55470f67063\") " pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.245340 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5lhh\" (UniqueName: \"kubernetes.io/projected/f24bb145-227c-43be-b63a-d606c168241b-kube-api-access-q5lhh\") pod \"barbican-operator-controller-manager-7f86f8796f-4xxbg\" (UID: \"f24bb145-227c-43be-b63a-d606c168241b\") " pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.247240 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.248447 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.255119 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.255400 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-dm4jb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.298129 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.299638 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.304550 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.306017 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s6kh\" (UniqueName: \"kubernetes.io/projected/6fdf0399-314b-40df-96f2-c27008769f71-kube-api-access-6s6kh\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.306077 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-988bx\" (UniqueName: \"kubernetes.io/projected/c2d80875-c32f-4596-a44d-6a4b9d524304-kube-api-access-988bx\") pod \"horizon-operator-controller-manager-77d5c5b54f-pvxjd\" (UID: \"c2d80875-c32f-4596-a44d-6a4b9d524304\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.306143 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgwrr\" (UniqueName: \"kubernetes.io/projected/0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0-kube-api-access-jgwrr\") pod \"glance-operator-controller-manager-78fdd796fd-gxvhn\" (UID: \"0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.306172 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.306201 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld6np\" (UniqueName: \"kubernetes.io/projected/55abdd00-6b2b-44a2-ae22-bae3fbb12282-kube-api-access-ld6np\") pod \"heat-operator-controller-manager-594c8c9d5d-hsh6j\" (UID: \"55abdd00-6b2b-44a2-ae22-bae3fbb12282\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.311385 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-jj8qc" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.319596 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.327004 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.349387 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgwrr\" (UniqueName: \"kubernetes.io/projected/0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0-kube-api-access-jgwrr\") pod \"glance-operator-controller-manager-78fdd796fd-gxvhn\" (UID: \"0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.350104 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.365697 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-988bx\" (UniqueName: \"kubernetes.io/projected/c2d80875-c32f-4596-a44d-6a4b9d524304-kube-api-access-988bx\") pod \"horizon-operator-controller-manager-77d5c5b54f-pvxjd\" (UID: \"c2d80875-c32f-4596-a44d-6a4b9d524304\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.373209 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld6np\" (UniqueName: \"kubernetes.io/projected/55abdd00-6b2b-44a2-ae22-bae3fbb12282-kube-api-access-ld6np\") pod \"heat-operator-controller-manager-594c8c9d5d-hsh6j\" (UID: \"55abdd00-6b2b-44a2-ae22-bae3fbb12282\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.379722 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.382612 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.388908 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.402157 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-d9dm4" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.410905 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwrds\" (UniqueName: \"kubernetes.io/projected/434f69b5-0d70-418d-aa5e-04e307a5399c-kube-api-access-qwrds\") pod \"ironic-operator-controller-manager-598f7747c9-z46t2\" (UID: \"434f69b5-0d70-418d-aa5e-04e307a5399c\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.410958 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.411025 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s6kh\" (UniqueName: \"kubernetes.io/projected/6fdf0399-314b-40df-96f2-c27008769f71-kube-api-access-6s6kh\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:10 crc kubenswrapper[4877]: E0128 16:55:10.411967 4877 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:10 crc kubenswrapper[4877]: E0128 16:55:10.412082 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert podName:6fdf0399-314b-40df-96f2-c27008769f71 nodeName:}" failed. No retries permitted until 2026-01-28 16:55:10.912054045 +0000 UTC m=+1214.470380933 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert") pod "infra-operator-controller-manager-694cf4f878-zsqn6" (UID: "6fdf0399-314b-40df-96f2-c27008769f71") : secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.415400 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.422550 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.424043 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.429564 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-4zdcw" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.467005 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.469154 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s6kh\" (UniqueName: \"kubernetes.io/projected/6fdf0399-314b-40df-96f2-c27008769f71-kube-api-access-6s6kh\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.480651 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.512824 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.513158 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwrds\" (UniqueName: \"kubernetes.io/projected/434f69b5-0d70-418d-aa5e-04e307a5399c-kube-api-access-qwrds\") pod \"ironic-operator-controller-manager-598f7747c9-z46t2\" (UID: \"434f69b5-0d70-418d-aa5e-04e307a5399c\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.513278 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkdqj\" (UniqueName: \"kubernetes.io/projected/aa23290f-1702-4c63-92c7-047d18922df9-kube-api-access-wkdqj\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-pm47m\" (UID: \"aa23290f-1702-4c63-92c7-047d18922df9\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.513354 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zb2t\" (UniqueName: \"kubernetes.io/projected/2106e351-4841-4ab5-84eb-745af2cb3379-kube-api-access-6zb2t\") pod \"manila-operator-controller-manager-78c6999f6f-bbb7n\" (UID: \"2106e351-4841-4ab5-84eb-745af2cb3379\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.530939 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.532466 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.538012 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-m8qnz" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.540446 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwrds\" (UniqueName: \"kubernetes.io/projected/434f69b5-0d70-418d-aa5e-04e307a5399c-kube-api-access-qwrds\") pod \"ironic-operator-controller-manager-598f7747c9-z46t2\" (UID: \"434f69b5-0d70-418d-aa5e-04e307a5399c\") " pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.554033 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.600865 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.615927 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkdqj\" (UniqueName: \"kubernetes.io/projected/aa23290f-1702-4c63-92c7-047d18922df9-kube-api-access-wkdqj\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-pm47m\" (UID: \"aa23290f-1702-4c63-92c7-047d18922df9\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.616017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zb2t\" (UniqueName: \"kubernetes.io/projected/2106e351-4841-4ab5-84eb-745af2cb3379-kube-api-access-6zb2t\") pod \"manila-operator-controller-manager-78c6999f6f-bbb7n\" (UID: \"2106e351-4841-4ab5-84eb-745af2cb3379\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.616096 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbwjp\" (UniqueName: \"kubernetes.io/projected/337c06ec-4c42-41a1-8faa-60338d4eeddc-kube-api-access-xbwjp\") pod \"keystone-operator-controller-manager-b8b6d4659-88b5k\" (UID: \"337c06ec-4c42-41a1-8faa-60338d4eeddc\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.625014 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.637664 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-c8f2z" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.648580 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zb2t\" (UniqueName: \"kubernetes.io/projected/2106e351-4841-4ab5-84eb-745af2cb3379-kube-api-access-6zb2t\") pod \"manila-operator-controller-manager-78c6999f6f-bbb7n\" (UID: \"2106e351-4841-4ab5-84eb-745af2cb3379\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.653203 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkdqj\" (UniqueName: \"kubernetes.io/projected/aa23290f-1702-4c63-92c7-047d18922df9-kube-api-access-wkdqj\") pod \"mariadb-operator-controller-manager-6b9fb5fdcb-pm47m\" (UID: \"aa23290f-1702-4c63-92c7-047d18922df9\") " pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.697950 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.699426 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.701170 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6r8gx" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.717953 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztt4f\" (UniqueName: \"kubernetes.io/projected/1b84deae-93ba-48f2-88b2-583025b41dc0-kube-api-access-ztt4f\") pod \"neutron-operator-controller-manager-78d58447c5-cl46k\" (UID: \"1b84deae-93ba-48f2-88b2-583025b41dc0\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.718024 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbwjp\" (UniqueName: \"kubernetes.io/projected/337c06ec-4c42-41a1-8faa-60338d4eeddc-kube-api-access-xbwjp\") pod \"keystone-operator-controller-manager-b8b6d4659-88b5k\" (UID: \"337c06ec-4c42-41a1-8faa-60338d4eeddc\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.718046 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhgvc\" (UniqueName: \"kubernetes.io/projected/4533e492-2631-4c22-af2f-6bec08b23280-kube-api-access-zhgvc\") pod \"nova-operator-controller-manager-7bdb645866-qpltb\" (UID: \"4533e492-2631-4c22-af2f-6bec08b23280\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.748533 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.759455 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.765696 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbwjp\" (UniqueName: \"kubernetes.io/projected/337c06ec-4c42-41a1-8faa-60338d4eeddc-kube-api-access-xbwjp\") pod \"keystone-operator-controller-manager-b8b6d4659-88b5k\" (UID: \"337c06ec-4c42-41a1-8faa-60338d4eeddc\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.776368 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.797095 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.798768 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.801902 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-6zkq8" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.812322 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.830541 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d72m\" (UniqueName: \"kubernetes.io/projected/cc059705-cab0-43ef-b078-34509b901591-kube-api-access-2d72m\") pod \"octavia-operator-controller-manager-5f4cd88d46-bdsdg\" (UID: \"cc059705-cab0-43ef-b078-34509b901591\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.830945 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztt4f\" (UniqueName: \"kubernetes.io/projected/1b84deae-93ba-48f2-88b2-583025b41dc0-kube-api-access-ztt4f\") pod \"neutron-operator-controller-manager-78d58447c5-cl46k\" (UID: \"1b84deae-93ba-48f2-88b2-583025b41dc0\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.831109 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhgvc\" (UniqueName: \"kubernetes.io/projected/4533e492-2631-4c22-af2f-6bec08b23280-kube-api-access-zhgvc\") pod \"nova-operator-controller-manager-7bdb645866-qpltb\" (UID: \"4533e492-2631-4c22-af2f-6bec08b23280\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.844030 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.852306 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.853707 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.861132 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-d98pf" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.869277 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhgvc\" (UniqueName: \"kubernetes.io/projected/4533e492-2631-4c22-af2f-6bec08b23280-kube-api-access-zhgvc\") pod \"nova-operator-controller-manager-7bdb645866-qpltb\" (UID: \"4533e492-2631-4c22-af2f-6bec08b23280\") " pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.870702 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.888223 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztt4f\" (UniqueName: \"kubernetes.io/projected/1b84deae-93ba-48f2-88b2-583025b41dc0-kube-api-access-ztt4f\") pod \"neutron-operator-controller-manager-78d58447c5-cl46k\" (UID: \"1b84deae-93ba-48f2-88b2-583025b41dc0\") " pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.891194 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.892853 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.895976 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-2tpvn" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.896073 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.902759 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.911704 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.912772 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.917380 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.918953 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.925809 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-wj5lg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.934219 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh6xp\" (UniqueName: \"kubernetes.io/projected/2e00aa5f-0d94-48bb-9802-cfff5c46490f-kube-api-access-bh6xp\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.934325 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d72m\" (UniqueName: \"kubernetes.io/projected/cc059705-cab0-43ef-b078-34509b901591-kube-api-access-2d72m\") pod \"octavia-operator-controller-manager-5f4cd88d46-bdsdg\" (UID: \"cc059705-cab0-43ef-b078-34509b901591\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.934425 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2mkw\" (UniqueName: \"kubernetes.io/projected/dc576625-1984-4bcf-9c11-8dfbe037d0a1-kube-api-access-r2mkw\") pod \"ovn-operator-controller-manager-6f75f45d54-vcxlv\" (UID: \"dc576625-1984-4bcf-9c11-8dfbe037d0a1\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.934573 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.934631 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:10 crc kubenswrapper[4877]: E0128 16:55:10.934755 4877 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:10 crc kubenswrapper[4877]: E0128 16:55:10.934907 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert podName:6fdf0399-314b-40df-96f2-c27008769f71 nodeName:}" failed. No retries permitted until 2026-01-28 16:55:11.934873113 +0000 UTC m=+1215.493200161 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert") pod "infra-operator-controller-manager-694cf4f878-zsqn6" (UID: "6fdf0399-314b-40df-96f2-c27008769f71") : secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.946448 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.948202 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.954188 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-ghxz6" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.957168 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.970259 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d72m\" (UniqueName: \"kubernetes.io/projected/cc059705-cab0-43ef-b078-34509b901591-kube-api-access-2d72m\") pod \"octavia-operator-controller-manager-5f4cd88d46-bdsdg\" (UID: \"cc059705-cab0-43ef-b078-34509b901591\") " pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.975584 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb"] Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.977355 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" Jan 28 16:55:10 crc kubenswrapper[4877]: I0128 16:55:10.996203 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.018614 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.024101 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.025279 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.036690 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzlk5\" (UniqueName: \"kubernetes.io/projected/a18cdf94-0fd1-491c-8213-f2bd11b787e2-kube-api-access-bzlk5\") pod \"placement-operator-controller-manager-79d5ccc684-kljnm\" (UID: \"a18cdf94-0fd1-491c-8213-f2bd11b787e2\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.036754 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.036836 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64bk2\" (UniqueName: \"kubernetes.io/projected/ddffc31e-38aa-45c6-bad8-5787adf8c7fe-kube-api-access-64bk2\") pod \"swift-operator-controller-manager-547cbdb99f-p88fg\" (UID: \"ddffc31e-38aa-45c6-bad8-5787adf8c7fe\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.036862 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh6xp\" (UniqueName: \"kubernetes.io/projected/2e00aa5f-0d94-48bb-9802-cfff5c46490f-kube-api-access-bh6xp\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.036970 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2mkw\" (UniqueName: \"kubernetes.io/projected/dc576625-1984-4bcf-9c11-8dfbe037d0a1-kube-api-access-r2mkw\") pod \"ovn-operator-controller-manager-6f75f45d54-vcxlv\" (UID: \"dc576625-1984-4bcf-9c11-8dfbe037d0a1\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.039288 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.052039 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-drdwg" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.062560 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2mkw\" (UniqueName: \"kubernetes.io/projected/dc576625-1984-4bcf-9c11-8dfbe037d0a1-kube-api-access-r2mkw\") pod \"ovn-operator-controller-manager-6f75f45d54-vcxlv\" (UID: \"dc576625-1984-4bcf-9c11-8dfbe037d0a1\") " pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.072904 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m"] Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.082658 4877 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.082736 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert podName:2e00aa5f-0d94-48bb-9802-cfff5c46490f nodeName:}" failed. No retries permitted until 2026-01-28 16:55:11.58271529 +0000 UTC m=+1215.141042178 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" (UID: "2e00aa5f-0d94-48bb-9802-cfff5c46490f") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.099291 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.100843 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.106189 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-tfzf4" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.122096 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh6xp\" (UniqueName: \"kubernetes.io/projected/2e00aa5f-0d94-48bb-9802-cfff5c46490f-kube-api-access-bh6xp\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.124283 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.139596 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzlk5\" (UniqueName: \"kubernetes.io/projected/a18cdf94-0fd1-491c-8213-f2bd11b787e2-kube-api-access-bzlk5\") pod \"placement-operator-controller-manager-79d5ccc684-kljnm\" (UID: \"a18cdf94-0fd1-491c-8213-f2bd11b787e2\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.139705 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzg5v\" (UniqueName: \"kubernetes.io/projected/f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c-kube-api-access-xzg5v\") pod \"telemetry-operator-controller-manager-6469dc96d7-np95m\" (UID: \"f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c\") " pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.139737 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64bk2\" (UniqueName: \"kubernetes.io/projected/ddffc31e-38aa-45c6-bad8-5787adf8c7fe-kube-api-access-64bk2\") pod \"swift-operator-controller-manager-547cbdb99f-p88fg\" (UID: \"ddffc31e-38aa-45c6-bad8-5787adf8c7fe\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.140056 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6mhf\" (UniqueName: \"kubernetes.io/projected/f47bead5-fd76-4061-8ca4-51ed7bf2d97d-kube-api-access-p6mhf\") pod \"test-operator-controller-manager-69797bbcbd-2zpzm\" (UID: \"f47bead5-fd76-4061-8ca4-51ed7bf2d97d\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.146987 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-5ndmd"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.149086 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.152170 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-2nvms" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.158699 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzlk5\" (UniqueName: \"kubernetes.io/projected/a18cdf94-0fd1-491c-8213-f2bd11b787e2-kube-api-access-bzlk5\") pod \"placement-operator-controller-manager-79d5ccc684-kljnm\" (UID: \"a18cdf94-0fd1-491c-8213-f2bd11b787e2\") " pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.161773 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64bk2\" (UniqueName: \"kubernetes.io/projected/ddffc31e-38aa-45c6-bad8-5787adf8c7fe-kube-api-access-64bk2\") pod \"swift-operator-controller-manager-547cbdb99f-p88fg\" (UID: \"ddffc31e-38aa-45c6-bad8-5787adf8c7fe\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.171843 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-5ndmd"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.174838 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.225594 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.227743 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.232064 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.232220 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-87qkv" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.232512 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.237386 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.243144 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7vzb\" (UniqueName: \"kubernetes.io/projected/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-kube-api-access-q7vzb\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.243278 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.243392 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzg5v\" (UniqueName: \"kubernetes.io/projected/f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c-kube-api-access-xzg5v\") pod \"telemetry-operator-controller-manager-6469dc96d7-np95m\" (UID: \"f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c\") " pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.243439 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.243606 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgxsv\" (UniqueName: \"kubernetes.io/projected/c6112d71-edde-4615-9f4d-1c59cf38702d-kube-api-access-vgxsv\") pod \"watcher-operator-controller-manager-564965969-5ndmd\" (UID: \"c6112d71-edde-4615-9f4d-1c59cf38702d\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.243788 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6mhf\" (UniqueName: \"kubernetes.io/projected/f47bead5-fd76-4061-8ca4-51ed7bf2d97d-kube-api-access-p6mhf\") pod \"test-operator-controller-manager-69797bbcbd-2zpzm\" (UID: \"f47bead5-fd76-4061-8ca4-51ed7bf2d97d\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.257974 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.259876 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.265044 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.267249 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-2lg2m" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.274246 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6mhf\" (UniqueName: \"kubernetes.io/projected/f47bead5-fd76-4061-8ca4-51ed7bf2d97d-kube-api-access-p6mhf\") pod \"test-operator-controller-manager-69797bbcbd-2zpzm\" (UID: \"f47bead5-fd76-4061-8ca4-51ed7bf2d97d\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.291332 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzg5v\" (UniqueName: \"kubernetes.io/projected/f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c-kube-api-access-xzg5v\") pod \"telemetry-operator-controller-manager-6469dc96d7-np95m\" (UID: \"f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c\") " pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.315406 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.362843 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7vzb\" (UniqueName: \"kubernetes.io/projected/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-kube-api-access-q7vzb\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.363257 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.363785 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.365046 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.367030 4877 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.367133 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:11.867107065 +0000 UTC m=+1215.425433943 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.367502 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgxsv\" (UniqueName: \"kubernetes.io/projected/c6112d71-edde-4615-9f4d-1c59cf38702d-kube-api-access-vgxsv\") pod \"watcher-operator-controller-manager-564965969-5ndmd\" (UID: \"c6112d71-edde-4615-9f4d-1c59cf38702d\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.367579 4877 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.368334 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:11.868297126 +0000 UTC m=+1215.426624014 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "metrics-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.404385 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgxsv\" (UniqueName: \"kubernetes.io/projected/c6112d71-edde-4615-9f4d-1c59cf38702d-kube-api-access-vgxsv\") pod \"watcher-operator-controller-manager-564965969-5ndmd\" (UID: \"c6112d71-edde-4615-9f4d-1c59cf38702d\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.408449 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7vzb\" (UniqueName: \"kubernetes.io/projected/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-kube-api-access-q7vzb\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.470523 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmrzz\" (UniqueName: \"kubernetes.io/projected/bf67fad9-2203-4da5-a976-c1b77f627d32-kube-api-access-rmrzz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-kh2w4\" (UID: \"bf67fad9-2203-4da5-a976-c1b77f627d32\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.494291 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.514150 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.534628 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.573563 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmrzz\" (UniqueName: \"kubernetes.io/projected/bf67fad9-2203-4da5-a976-c1b77f627d32-kube-api-access-rmrzz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-kh2w4\" (UID: \"bf67fad9-2203-4da5-a976-c1b77f627d32\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.596544 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmrzz\" (UniqueName: \"kubernetes.io/projected/bf67fad9-2203-4da5-a976-c1b77f627d32-kube-api-access-rmrzz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-kh2w4\" (UID: \"bf67fad9-2203-4da5-a976-c1b77f627d32\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.657364 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.676231 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.676568 4877 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.676644 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert podName:2e00aa5f-0d94-48bb-9802-cfff5c46490f nodeName:}" failed. No retries permitted until 2026-01-28 16:55:12.676619622 +0000 UTC m=+1216.234946510 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" (UID: "2e00aa5f-0d94-48bb-9802-cfff5c46490f") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.861979 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.869693 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.878176 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd"] Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.880665 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.880794 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.880841 4877 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.880908 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:12.880884405 +0000 UTC m=+1216.439211293 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.881161 4877 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.881268 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:12.881237834 +0000 UTC m=+1216.439564792 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "metrics-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.982661 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.982854 4877 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: E0128 16:55:11.982913 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert podName:6fdf0399-314b-40df-96f2-c27008769f71 nodeName:}" failed. No retries permitted until 2026-01-28 16:55:13.982891494 +0000 UTC m=+1217.541218382 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert") pod "infra-operator-controller-manager-694cf4f878-zsqn6" (UID: "6fdf0399-314b-40df-96f2-c27008769f71") : secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.988821 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" event={"ID":"e068371c-e59c-4e57-8fd3-a55470f67063","Type":"ContainerStarted","Data":"07a58d466240793f4c334135dc1ecdc920aa6c1f9669af4c83e7bc82f135ca82"} Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.991664 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" event={"ID":"f24bb145-227c-43be-b63a-d606c168241b","Type":"ContainerStarted","Data":"558b664b23349ca0bcc2cc4b3fe0c5123267d7c509ce0ec952ae173aa6192a1e"} Jan 28 16:55:11 crc kubenswrapper[4877]: I0128 16:55:11.993252 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" event={"ID":"c2d80875-c32f-4596-a44d-6a4b9d524304","Type":"ContainerStarted","Data":"de0ca5bab0dbd6a1738a57435a43472aef3855fdc8af213f326456f5cb414198"} Jan 28 16:55:12 crc kubenswrapper[4877]: W0128 16:55:12.231138 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55abdd00_6b2b_44a2_ae22_bae3fbb12282.slice/crio-648731e5a74bc80dc7fe08d4d50e37e1f49458a31904f5d396b83a90b3eacc05 WatchSource:0}: Error finding container 648731e5a74bc80dc7fe08d4d50e37e1f49458a31904f5d396b83a90b3eacc05: Status 404 returned error can't find the container with id 648731e5a74bc80dc7fe08d4d50e37e1f49458a31904f5d396b83a90b3eacc05 Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.242394 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.255334 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.266873 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.667051 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.686413 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.699220 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:12 crc kubenswrapper[4877]: E0128 16:55:12.699571 4877 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:12 crc kubenswrapper[4877]: E0128 16:55:12.699650 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert podName:2e00aa5f-0d94-48bb-9802-cfff5c46490f nodeName:}" failed. No retries permitted until 2026-01-28 16:55:14.699624503 +0000 UTC m=+1218.257951391 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" (UID: "2e00aa5f-0d94-48bb-9802-cfff5c46490f") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.744853 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.762590 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k"] Jan 28 16:55:12 crc kubenswrapper[4877]: W0128 16:55:12.763622 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa23290f_1702_4c63_92c7_047d18922df9.slice/crio-9d8156fb0980baabc9ad647c58b8d706dcc8bea170bb80652b617a65de274750 WatchSource:0}: Error finding container 9d8156fb0980baabc9ad647c58b8d706dcc8bea170bb80652b617a65de274750: Status 404 returned error can't find the container with id 9d8156fb0980baabc9ad647c58b8d706dcc8bea170bb80652b617a65de274750 Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.781178 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n"] Jan 28 16:55:12 crc kubenswrapper[4877]: W0128 16:55:12.796904 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod337c06ec_4c42_41a1_8faa_60338d4eeddc.slice/crio-23096881c7121adb0285dc204ca8ef4c9a747ff74b12f6751567e3acb3ab7d3a WatchSource:0}: Error finding container 23096881c7121adb0285dc204ca8ef4c9a747ff74b12f6751567e3acb3ab7d3a: Status 404 returned error can't find the container with id 23096881c7121adb0285dc204ca8ef4c9a747ff74b12f6751567e3acb3ab7d3a Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.797059 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.836815 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.854449 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb"] Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.906039 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:12 crc kubenswrapper[4877]: I0128 16:55:12.906175 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:12 crc kubenswrapper[4877]: E0128 16:55:12.906344 4877 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 16:55:12 crc kubenswrapper[4877]: E0128 16:55:12.906447 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:14.906422362 +0000 UTC m=+1218.464749250 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "webhook-server-cert" not found Jan 28 16:55:12 crc kubenswrapper[4877]: E0128 16:55:12.906364 4877 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 16:55:12 crc kubenswrapper[4877]: E0128 16:55:12.906532 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:14.906510525 +0000 UTC m=+1218.464837413 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "metrics-server-cert" not found Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.144751 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" event={"ID":"0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0","Type":"ContainerStarted","Data":"67bd2d6bbd67e786f9e2b79373dcaffb18f6c449281bc1cb778079560ce048ab"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.164867 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" event={"ID":"55abdd00-6b2b-44a2-ae22-bae3fbb12282","Type":"ContainerStarted","Data":"648731e5a74bc80dc7fe08d4d50e37e1f49458a31904f5d396b83a90b3eacc05"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.193943 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" event={"ID":"1b84deae-93ba-48f2-88b2-583025b41dc0","Type":"ContainerStarted","Data":"38b0cd9f0873584bd6a8a81dee84f7477a9b941afae1ad9c2f26c28e71f495f6"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.214814 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" event={"ID":"ddffc31e-38aa-45c6-bad8-5787adf8c7fe","Type":"ContainerStarted","Data":"be7476e14b8c3c0bb5d8713b3732c142bb4333b82b9d7f6dda53a7925617b533"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.252007 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" event={"ID":"4533e492-2631-4c22-af2f-6bec08b23280","Type":"ContainerStarted","Data":"d804d32b349ec355fb9dffd1a9e4b04b674e7a47d89f728d6c730fecdc10a6c8"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.297377 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm"] Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.306464 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" event={"ID":"434f69b5-0d70-418d-aa5e-04e307a5399c","Type":"ContainerStarted","Data":"59b38dfae061890d2bc76e3e764c2c42e6cb814f6cd97d3777281b3c3770f86a"} Jan 28 16:55:13 crc kubenswrapper[4877]: W0128 16:55:13.333109 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda18cdf94_0fd1_491c_8213_f2bd11b787e2.slice/crio-6926c29fa5a515b9a2a0009b60b425846c6607b89bfd0f916d7796b2374758ca WatchSource:0}: Error finding container 6926c29fa5a515b9a2a0009b60b425846c6607b89bfd0f916d7796b2374758ca: Status 404 returned error can't find the container with id 6926c29fa5a515b9a2a0009b60b425846c6607b89bfd0f916d7796b2374758ca Jan 28 16:55:13 crc kubenswrapper[4877]: E0128 16:55:13.404692 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vgxsv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-564965969-5ndmd_openstack-operators(c6112d71-edde-4615-9f4d-1c59cf38702d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 16:55:13 crc kubenswrapper[4877]: E0128 16:55:13.404744 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bzlk5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-79d5ccc684-kljnm_openstack-operators(a18cdf94-0fd1-491c-8213-f2bd11b787e2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 16:55:13 crc kubenswrapper[4877]: E0128 16:55:13.407595 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" podUID="a18cdf94-0fd1-491c-8213-f2bd11b787e2" Jan 28 16:55:13 crc kubenswrapper[4877]: E0128 16:55:13.407692 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" podUID="c6112d71-edde-4615-9f4d-1c59cf38702d" Jan 28 16:55:13 crc kubenswrapper[4877]: E0128 16:55:13.412304 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rmrzz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-kh2w4_openstack-operators(bf67fad9-2203-4da5-a976-c1b77f627d32): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 16:55:13 crc kubenswrapper[4877]: E0128 16:55:13.413771 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" podUID="bf67fad9-2203-4da5-a976-c1b77f627d32" Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.416131 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m"] Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.416330 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" event={"ID":"cc059705-cab0-43ef-b078-34509b901591","Type":"ContainerStarted","Data":"b3d09a054077418c58f421a9f51030e2174538722b1b2af4b28dd492c145550d"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.416614 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" event={"ID":"aa23290f-1702-4c63-92c7-047d18922df9","Type":"ContainerStarted","Data":"9d8156fb0980baabc9ad647c58b8d706dcc8bea170bb80652b617a65de274750"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.416702 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm"] Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.419145 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4"] Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.419209 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" event={"ID":"2106e351-4841-4ab5-84eb-745af2cb3379","Type":"ContainerStarted","Data":"0a117076980056d9d18a5a4ff00dcbfd10d87bd1fd60b669b23152b89cf21558"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.425457 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" event={"ID":"970f0fe7-15e4-4fcf-bca0-eb07b26ba94a","Type":"ContainerStarted","Data":"9ca3ad992a2c6c5c64098004990b11e83429f1bc439a93fc200fa8f890c47904"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.433144 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" event={"ID":"337c06ec-4c42-41a1-8faa-60338d4eeddc","Type":"ContainerStarted","Data":"23096881c7121adb0285dc204ca8ef4c9a747ff74b12f6751567e3acb3ab7d3a"} Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.440176 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv"] Jan 28 16:55:13 crc kubenswrapper[4877]: I0128 16:55:13.451862 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-5ndmd"] Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.006855 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.007153 4877 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.007219 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert podName:6fdf0399-314b-40df-96f2-c27008769f71 nodeName:}" failed. No retries permitted until 2026-01-28 16:55:18.007205034 +0000 UTC m=+1221.565531912 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert") pod "infra-operator-controller-manager-694cf4f878-zsqn6" (UID: "6fdf0399-314b-40df-96f2-c27008769f71") : secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.489812 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" event={"ID":"dc576625-1984-4bcf-9c11-8dfbe037d0a1","Type":"ContainerStarted","Data":"0e55f745bacb28741966065ee6913b2e18821800394c6f402762884a693be87b"} Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.499724 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" event={"ID":"f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c","Type":"ContainerStarted","Data":"6503c0aa08b31409df2cf87226f8ed522211387e75b2019954ea554df5ce4831"} Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.513509 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" event={"ID":"c6112d71-edde-4615-9f4d-1c59cf38702d","Type":"ContainerStarted","Data":"c8b8141275236de2500c930913748e2d360335eadbcd73a2fce0617c4fbecc43"} Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.525054 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" podUID="c6112d71-edde-4615-9f4d-1c59cf38702d" Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.530238 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" event={"ID":"bf67fad9-2203-4da5-a976-c1b77f627d32","Type":"ContainerStarted","Data":"bff19df7e9ebf2ade40cbc4ab85b5673fc50ded56952c68e20ec7875b66e606d"} Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.545516 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" event={"ID":"f47bead5-fd76-4061-8ca4-51ed7bf2d97d","Type":"ContainerStarted","Data":"55c9615cf72a33169b1bf0ef37467e380553fe913982550a645cdc73d42cc9c2"} Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.549608 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" podUID="bf67fad9-2203-4da5-a976-c1b77f627d32" Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.566441 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" event={"ID":"a18cdf94-0fd1-491c-8213-f2bd11b787e2","Type":"ContainerStarted","Data":"6926c29fa5a515b9a2a0009b60b425846c6607b89bfd0f916d7796b2374758ca"} Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.569473 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" podUID="a18cdf94-0fd1-491c-8213-f2bd11b787e2" Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.723865 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.724201 4877 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.724280 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert podName:2e00aa5f-0d94-48bb-9802-cfff5c46490f nodeName:}" failed. No retries permitted until 2026-01-28 16:55:18.724264792 +0000 UTC m=+1222.282591680 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" (UID: "2e00aa5f-0d94-48bb-9802-cfff5c46490f") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.927532 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:14 crc kubenswrapper[4877]: I0128 16:55:14.927653 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.927732 4877 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.927822 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:18.927798985 +0000 UTC m=+1222.486125863 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "webhook-server-cert" not found Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.927893 4877 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 16:55:14 crc kubenswrapper[4877]: E0128 16:55:14.928007 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:18.92798188 +0000 UTC m=+1222.486308768 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "metrics-server-cert" not found Jan 28 16:55:15 crc kubenswrapper[4877]: E0128 16:55:15.585011 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" podUID="bf67fad9-2203-4da5-a976-c1b77f627d32" Jan 28 16:55:15 crc kubenswrapper[4877]: E0128 16:55:15.585415 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" podUID="c6112d71-edde-4615-9f4d-1c59cf38702d" Jan 28 16:55:15 crc kubenswrapper[4877]: E0128 16:55:15.587656 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:013c0ad82d21a21c7eece5cd4b5d5c4b8eb410b6671ac33a6f3fb78c8510811d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" podUID="a18cdf94-0fd1-491c-8213-f2bd11b787e2" Jan 28 16:55:18 crc kubenswrapper[4877]: I0128 16:55:18.020704 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.021430 4877 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.021506 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert podName:6fdf0399-314b-40df-96f2-c27008769f71 nodeName:}" failed. No retries permitted until 2026-01-28 16:55:26.021468799 +0000 UTC m=+1229.579795677 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert") pod "infra-operator-controller-manager-694cf4f878-zsqn6" (UID: "6fdf0399-314b-40df-96f2-c27008769f71") : secret "infra-operator-webhook-server-cert" not found Jan 28 16:55:18 crc kubenswrapper[4877]: I0128 16:55:18.745549 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.745758 4877 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.746296 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert podName:2e00aa5f-0d94-48bb-9802-cfff5c46490f nodeName:}" failed. No retries permitted until 2026-01-28 16:55:26.74626641 +0000 UTC m=+1230.304593298 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" (UID: "2e00aa5f-0d94-48bb-9802-cfff5c46490f") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 16:55:18 crc kubenswrapper[4877]: I0128 16:55:18.950039 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:18 crc kubenswrapper[4877]: I0128 16:55:18.950166 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.950841 4877 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.950855 4877 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.950960 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:26.950932113 +0000 UTC m=+1230.509259001 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "webhook-server-cert" not found Jan 28 16:55:18 crc kubenswrapper[4877]: E0128 16:55:18.951056 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:26.951029086 +0000 UTC m=+1230.509355984 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "metrics-server-cert" not found Jan 28 16:55:26 crc kubenswrapper[4877]: I0128 16:55:26.120447 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:26 crc kubenswrapper[4877]: I0128 16:55:26.127016 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6fdf0399-314b-40df-96f2-c27008769f71-cert\") pod \"infra-operator-controller-manager-694cf4f878-zsqn6\" (UID: \"6fdf0399-314b-40df-96f2-c27008769f71\") " pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:26 crc kubenswrapper[4877]: I0128 16:55:26.227398 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:55:26 crc kubenswrapper[4877]: I0128 16:55:26.836967 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:26 crc kubenswrapper[4877]: I0128 16:55:26.841581 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2e00aa5f-0d94-48bb-9802-cfff5c46490f-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb\" (UID: \"2e00aa5f-0d94-48bb-9802-cfff5c46490f\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:26 crc kubenswrapper[4877]: I0128 16:55:26.938741 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:55:27 crc kubenswrapper[4877]: I0128 16:55:27.041097 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:27 crc kubenswrapper[4877]: I0128 16:55:27.041219 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:27 crc kubenswrapper[4877]: E0128 16:55:27.041398 4877 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 16:55:27 crc kubenswrapper[4877]: E0128 16:55:27.041533 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs podName:8df1e028-d1c7-4b68-b63a-8cc8e762b59d nodeName:}" failed. No retries permitted until 2026-01-28 16:55:43.041507759 +0000 UTC m=+1246.599834657 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs") pod "openstack-operator-controller-manager-659b944486-tdjwf" (UID: "8df1e028-d1c7-4b68-b63a-8cc8e762b59d") : secret "webhook-server-cert" not found Jan 28 16:55:27 crc kubenswrapper[4877]: I0128 16:55:27.047305 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-metrics-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:27 crc kubenswrapper[4877]: E0128 16:55:27.819693 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:ed489f21a0c72557d2da5a271808f19b7c7b85ef32fd9f4aa91bdbfc5bca3bdd" Jan 28 16:55:27 crc kubenswrapper[4877]: E0128 16:55:27.819936 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ed489f21a0c72557d2da5a271808f19b7c7b85ef32fd9f4aa91bdbfc5bca3bdd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2d72m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-5f4cd88d46-bdsdg_openstack-operators(cc059705-cab0-43ef-b078-34509b901591): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:27 crc kubenswrapper[4877]: E0128 16:55:27.821115 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" podUID="cc059705-cab0-43ef-b078-34509b901591" Jan 28 16:55:28 crc kubenswrapper[4877]: E0128 16:55:28.741551 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ed489f21a0c72557d2da5a271808f19b7c7b85ef32fd9f4aa91bdbfc5bca3bdd\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" podUID="cc059705-cab0-43ef-b078-34509b901591" Jan 28 16:55:29 crc kubenswrapper[4877]: E0128 16:55:29.307986 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337" Jan 28 16:55:29 crc kubenswrapper[4877]: E0128 16:55:29.308325 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jgwrr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-78fdd796fd-gxvhn_openstack-operators(0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:29 crc kubenswrapper[4877]: E0128 16:55:29.309700 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" podUID="0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0" Jan 28 16:55:29 crc kubenswrapper[4877]: E0128 16:55:29.749465 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:9caae9b3ee328df678baa26454e45e47693acdadb27f9c635680597aaec43337\\\"\"" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" podUID="0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0" Jan 28 16:55:31 crc kubenswrapper[4877]: E0128 16:55:31.438986 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece" Jan 28 16:55:31 crc kubenswrapper[4877]: E0128 16:55:31.439572 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5p78d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-b45d7bf98-vsnlm_openstack-operators(970f0fe7-15e4-4fcf-bca0-eb07b26ba94a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:31 crc kubenswrapper[4877]: E0128 16:55:31.440846 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" podUID="970f0fe7-15e4-4fcf-bca0-eb07b26ba94a" Jan 28 16:55:31 crc kubenswrapper[4877]: E0128 16:55:31.768319 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:6c88312afa9673f7b72c558368034d7a488ead73080cdcdf581fe85b99263ece\\\"\"" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" podUID="970f0fe7-15e4-4fcf-bca0-eb07b26ba94a" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.004143 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.004443 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-64bk2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-p88fg_openstack-operators(ddffc31e-38aa-45c6-bad8-5787adf8c7fe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.005858 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" podUID="ddffc31e-38aa-45c6-bad8-5787adf8c7fe" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.536414 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:4d55bd6418df3f63f4d3fe47bebf3f5498a520b3e14af98fe16c85ef9fd54d5e" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.536719 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:4d55bd6418df3f63f4d3fe47bebf3f5498a520b3e14af98fe16c85ef9fd54d5e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qwrds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-598f7747c9-z46t2_openstack-operators(434f69b5-0d70-418d-aa5e-04e307a5399c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.537950 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" podUID="434f69b5-0d70-418d-aa5e-04e307a5399c" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.779789 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:4d55bd6418df3f63f4d3fe47bebf3f5498a520b3e14af98fe16c85ef9fd54d5e\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" podUID="434f69b5-0d70-418d-aa5e-04e307a5399c" Jan 28 16:55:32 crc kubenswrapper[4877]: E0128 16:55:32.780627 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" podUID="ddffc31e-38aa-45c6-bad8-5787adf8c7fe" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.181060 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.181800 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ld6np,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-594c8c9d5d-hsh6j_openstack-operators(55abdd00-6b2b-44a2-ae22-bae3fbb12282): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.183068 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" podUID="55abdd00-6b2b-44a2-ae22-bae3fbb12282" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.733955 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:b916c87806b7eadd83e0ca890c3c24fb990fc5beb48ddc4537e3384efd3e62f7" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.734229 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:b916c87806b7eadd83e0ca890c3c24fb990fc5beb48ddc4537e3384efd3e62f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fmhzt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-7478f7dbf9-qrkvb_openstack-operators(e068371c-e59c-4e57-8fd3-a55470f67063): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.735666 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" podUID="e068371c-e59c-4e57-8fd3-a55470f67063" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.794012 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492\\\"\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" podUID="55abdd00-6b2b-44a2-ae22-bae3fbb12282" Jan 28 16:55:33 crc kubenswrapper[4877]: E0128 16:55:33.796466 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:b916c87806b7eadd83e0ca890c3c24fb990fc5beb48ddc4537e3384efd3e62f7\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" podUID="e068371c-e59c-4e57-8fd3-a55470f67063" Jan 28 16:55:34 crc kubenswrapper[4877]: E0128 16:55:34.298763 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d" Jan 28 16:55:34 crc kubenswrapper[4877]: E0128 16:55:34.299048 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p6mhf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-2zpzm_openstack-operators(f47bead5-fd76-4061-8ca4-51ed7bf2d97d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:34 crc kubenswrapper[4877]: E0128 16:55:34.300213 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" podUID="f47bead5-fd76-4061-8ca4-51ed7bf2d97d" Jan 28 16:55:34 crc kubenswrapper[4877]: E0128 16:55:34.803892 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" podUID="f47bead5-fd76-4061-8ca4-51ed7bf2d97d" Jan 28 16:55:34 crc kubenswrapper[4877]: E0128 16:55:34.897938 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:b673f00227298dcfa89abb46f8296a0825add42da41e8a4bf4dd13367c738d84" Jan 28 16:55:34 crc kubenswrapper[4877]: E0128 16:55:34.898618 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:b673f00227298dcfa89abb46f8296a0825add42da41e8a4bf4dd13367c738d84,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wkdqj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-6b9fb5fdcb-pm47m_openstack-operators(aa23290f-1702-4c63-92c7-047d18922df9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:34 crc kubenswrapper[4877]: E0128 16:55:34.899908 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" podUID="aa23290f-1702-4c63-92c7-047d18922df9" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.383846 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.384134 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6zb2t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-78c6999f6f-bbb7n_openstack-operators(2106e351-4841-4ab5-84eb-745af2cb3379): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.385353 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" podUID="2106e351-4841-4ab5-84eb-745af2cb3379" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.475669 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.129.56.18:5001/openstack-k8s-operators/telemetry-operator:a5bcf05e2d71c610156d017fdf197f7c58570d79" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.475762 4877 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.129.56.18:5001/openstack-k8s-operators/telemetry-operator:a5bcf05e2d71c610156d017fdf197f7c58570d79" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.475983 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.129.56.18:5001/openstack-k8s-operators/telemetry-operator:a5bcf05e2d71c610156d017fdf197f7c58570d79,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xzg5v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6469dc96d7-np95m_openstack-operators(f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.477247 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" podUID="f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.812274 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" podUID="2106e351-4841-4ab5-84eb-745af2cb3379" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.812748 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.129.56.18:5001/openstack-k8s-operators/telemetry-operator:a5bcf05e2d71c610156d017fdf197f7c58570d79\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" podUID="f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c" Jan 28 16:55:35 crc kubenswrapper[4877]: E0128 16:55:35.812855 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:b673f00227298dcfa89abb46f8296a0825add42da41e8a4bf4dd13367c738d84\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" podUID="aa23290f-1702-4c63-92c7-047d18922df9" Jan 28 16:55:36 crc kubenswrapper[4877]: E0128 16:55:36.000884 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 28 16:55:36 crc kubenswrapper[4877]: E0128 16:55:36.001318 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xbwjp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-88b5k_openstack-operators(337c06ec-4c42-41a1-8faa-60338d4eeddc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:36 crc kubenswrapper[4877]: E0128 16:55:36.002667 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" podUID="337c06ec-4c42-41a1-8faa-60338d4eeddc" Jan 28 16:55:36 crc kubenswrapper[4877]: E0128 16:55:36.826046 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" podUID="337c06ec-4c42-41a1-8faa-60338d4eeddc" Jan 28 16:55:39 crc kubenswrapper[4877]: E0128 16:55:39.174703 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658" Jan 28 16:55:39 crc kubenswrapper[4877]: E0128 16:55:39.175239 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zhgvc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-7bdb645866-qpltb_openstack-operators(4533e492-2631-4c22-af2f-6bec08b23280): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:55:39 crc kubenswrapper[4877]: E0128 16:55:39.176802 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" podUID="4533e492-2631-4c22-af2f-6bec08b23280" Jan 28 16:55:39 crc kubenswrapper[4877]: I0128 16:55:39.593885 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb"] Jan 28 16:55:39 crc kubenswrapper[4877]: I0128 16:55:39.854711 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" event={"ID":"2e00aa5f-0d94-48bb-9802-cfff5c46490f","Type":"ContainerStarted","Data":"82174bd55aad4c1366b7bbf7f11c9dc2475ebcb8ff86747762cd63ed0a18269b"} Jan 28 16:55:39 crc kubenswrapper[4877]: E0128 16:55:39.856101 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:8abfbec47f0119a6c22c61a0ff80a4b1c6c14439a327bc75d4c529c5d8f59658\\\"\"" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" podUID="4533e492-2631-4c22-af2f-6bec08b23280" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.138997 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6"] Jan 28 16:55:40 crc kubenswrapper[4877]: W0128 16:55:40.141532 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fdf0399_314b_40df_96f2_c27008769f71.slice/crio-43395313b180f41b89c1043fb65bf1a8382f0a49c227091ddb643d76b9481820 WatchSource:0}: Error finding container 43395313b180f41b89c1043fb65bf1a8382f0a49c227091ddb643d76b9481820: Status 404 returned error can't find the container with id 43395313b180f41b89c1043fb65bf1a8382f0a49c227091ddb643d76b9481820 Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.864959 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" event={"ID":"6fdf0399-314b-40df-96f2-c27008769f71","Type":"ContainerStarted","Data":"43395313b180f41b89c1043fb65bf1a8382f0a49c227091ddb643d76b9481820"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.866941 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" event={"ID":"f24bb145-227c-43be-b63a-d606c168241b","Type":"ContainerStarted","Data":"63f2ba212f5b4170773d9307417fd483fd5bc20ec7003127d9d97c2c68b346a0"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.867167 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.868534 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" event={"ID":"a18cdf94-0fd1-491c-8213-f2bd11b787e2","Type":"ContainerStarted","Data":"6527add2547867d8938aa224c7b450e940c1c0c538cf0bcde39cc44c080f9925"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.868960 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.870050 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" event={"ID":"1b84deae-93ba-48f2-88b2-583025b41dc0","Type":"ContainerStarted","Data":"f8c904ca783fa310d0260afc0e41b451375cdf7de95d2e1b14a237b4e938779b"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.870625 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.872007 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" event={"ID":"dc576625-1984-4bcf-9c11-8dfbe037d0a1","Type":"ContainerStarted","Data":"81a43c04c528c25e9776f7b4b90a39ef2c45d4cf45549bde16b4a7c58e134d87"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.872765 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.875449 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" event={"ID":"c2d80875-c32f-4596-a44d-6a4b9d524304","Type":"ContainerStarted","Data":"7e80aebe298c750a051964cfa2fd1e58e99ab9f0d4124b67df2e40e460b312bf"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.876636 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.878108 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" event={"ID":"c6112d71-edde-4615-9f4d-1c59cf38702d","Type":"ContainerStarted","Data":"c009ff3f7517f3b294d19bd0773894cd35edde422d68704e43acbd6b8a00f707"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.878888 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.880402 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" event={"ID":"bf67fad9-2203-4da5-a976-c1b77f627d32","Type":"ContainerStarted","Data":"27b885d2827c576f834682767cb804641d6456e1ef7ded3ca1b4d49317834967"} Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.892143 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" podStartSLOduration=7.789793064 podStartE2EDuration="31.892120327s" podCreationTimestamp="2026-01-28 16:55:09 +0000 UTC" firstStartedPulling="2026-01-28 16:55:11.888046064 +0000 UTC m=+1215.446372952" lastFinishedPulling="2026-01-28 16:55:35.990373327 +0000 UTC m=+1239.548700215" observedRunningTime="2026-01-28 16:55:40.886068502 +0000 UTC m=+1244.444395390" watchObservedRunningTime="2026-01-28 16:55:40.892120327 +0000 UTC m=+1244.450447215" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.929348 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" podStartSLOduration=4.596205942 podStartE2EDuration="30.929322812s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:13.404464028 +0000 UTC m=+1216.962790916" lastFinishedPulling="2026-01-28 16:55:39.737580898 +0000 UTC m=+1243.295907786" observedRunningTime="2026-01-28 16:55:40.927579346 +0000 UTC m=+1244.485906234" watchObservedRunningTime="2026-01-28 16:55:40.929322812 +0000 UTC m=+1244.487649700" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.937149 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" podStartSLOduration=7.731555291 podStartE2EDuration="30.937125471s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.784805857 +0000 UTC m=+1216.343132745" lastFinishedPulling="2026-01-28 16:55:35.990376037 +0000 UTC m=+1239.548702925" observedRunningTime="2026-01-28 16:55:40.914887151 +0000 UTC m=+1244.473214039" watchObservedRunningTime="2026-01-28 16:55:40.937125471 +0000 UTC m=+1244.495452359" Jan 28 16:55:40 crc kubenswrapper[4877]: I0128 16:55:40.958610 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" podStartSLOduration=8.371355769000001 podStartE2EDuration="30.958584643s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:13.403331038 +0000 UTC m=+1216.961657936" lastFinishedPulling="2026-01-28 16:55:35.990559922 +0000 UTC m=+1239.548886810" observedRunningTime="2026-01-28 16:55:40.954424686 +0000 UTC m=+1244.512751574" watchObservedRunningTime="2026-01-28 16:55:40.958584643 +0000 UTC m=+1244.516911531" Jan 28 16:55:41 crc kubenswrapper[4877]: I0128 16:55:41.007425 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" podStartSLOduration=4.675767493 podStartE2EDuration="31.007393524s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:13.40451038 +0000 UTC m=+1216.962837268" lastFinishedPulling="2026-01-28 16:55:39.736136371 +0000 UTC m=+1243.294463299" observedRunningTime="2026-01-28 16:55:40.989184537 +0000 UTC m=+1244.547511455" watchObservedRunningTime="2026-01-28 16:55:41.007393524 +0000 UTC m=+1244.565720422" Jan 28 16:55:41 crc kubenswrapper[4877]: I0128 16:55:41.013409 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" podStartSLOduration=6.908697533 podStartE2EDuration="31.013378559s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:11.885778353 +0000 UTC m=+1215.444105241" lastFinishedPulling="2026-01-28 16:55:35.990459379 +0000 UTC m=+1239.548786267" observedRunningTime="2026-01-28 16:55:41.006261056 +0000 UTC m=+1244.564587944" watchObservedRunningTime="2026-01-28 16:55:41.013378559 +0000 UTC m=+1244.571705457" Jan 28 16:55:41 crc kubenswrapper[4877]: I0128 16:55:41.028712 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-kh2w4" podStartSLOduration=4.635119325 podStartE2EDuration="31.028683452s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:13.412098769 +0000 UTC m=+1216.970425657" lastFinishedPulling="2026-01-28 16:55:39.805662896 +0000 UTC m=+1243.363989784" observedRunningTime="2026-01-28 16:55:41.022629656 +0000 UTC m=+1244.580956544" watchObservedRunningTime="2026-01-28 16:55:41.028683452 +0000 UTC m=+1244.587010350" Jan 28 16:55:43 crc kubenswrapper[4877]: I0128 16:55:43.101334 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:43 crc kubenswrapper[4877]: I0128 16:55:43.108733 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8df1e028-d1c7-4b68-b63a-8cc8e762b59d-webhook-certs\") pod \"openstack-operator-controller-manager-659b944486-tdjwf\" (UID: \"8df1e028-d1c7-4b68-b63a-8cc8e762b59d\") " pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:43 crc kubenswrapper[4877]: I0128 16:55:43.143051 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:43 crc kubenswrapper[4877]: I0128 16:55:43.633886 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf"] Jan 28 16:55:43 crc kubenswrapper[4877]: W0128 16:55:43.637962 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8df1e028_d1c7_4b68_b63a_8cc8e762b59d.slice/crio-8fb6a0357666bd80f3337a1436d81cbf1eb33b3d6317513ce817382aa51596e0 WatchSource:0}: Error finding container 8fb6a0357666bd80f3337a1436d81cbf1eb33b3d6317513ce817382aa51596e0: Status 404 returned error can't find the container with id 8fb6a0357666bd80f3337a1436d81cbf1eb33b3d6317513ce817382aa51596e0 Jan 28 16:55:43 crc kubenswrapper[4877]: I0128 16:55:43.922278 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" event={"ID":"8df1e028-d1c7-4b68-b63a-8cc8e762b59d","Type":"ContainerStarted","Data":"8fb6a0357666bd80f3337a1436d81cbf1eb33b3d6317513ce817382aa51596e0"} Jan 28 16:55:45 crc kubenswrapper[4877]: I0128 16:55:45.941622 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" event={"ID":"8df1e028-d1c7-4b68-b63a-8cc8e762b59d","Type":"ContainerStarted","Data":"3893e368d86f530e79c2f3fe305936953c5dd1fbcab738def5754c07e5bd0358"} Jan 28 16:55:45 crc kubenswrapper[4877]: I0128 16:55:45.942280 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:55:45 crc kubenswrapper[4877]: I0128 16:55:45.979711 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" podStartSLOduration=35.979673525 podStartE2EDuration="35.979673525s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:55:45.968612951 +0000 UTC m=+1249.526939839" watchObservedRunningTime="2026-01-28 16:55:45.979673525 +0000 UTC m=+1249.538000413" Jan 28 16:55:50 crc kubenswrapper[4877]: I0128 16:55:50.353606 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" Jan 28 16:55:50 crc kubenswrapper[4877]: I0128 16:55:50.488206 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" Jan 28 16:55:51 crc kubenswrapper[4877]: I0128 16:55:51.001367 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" Jan 28 16:55:51 crc kubenswrapper[4877]: I0128 16:55:51.319833 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" Jan 28 16:55:51 crc kubenswrapper[4877]: I0128 16:55:51.375777 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" Jan 28 16:55:51 crc kubenswrapper[4877]: I0128 16:55:51.536848 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" Jan 28 16:55:53 crc kubenswrapper[4877]: I0128 16:55:53.149307 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.062862 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" event={"ID":"cc059705-cab0-43ef-b078-34509b901591","Type":"ContainerStarted","Data":"bea5e44d953270cdae9b32d3a61f23dd8dae15d29a64f966d3837ff6dc3b9429"} Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.064177 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.066637 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" event={"ID":"0e3ad19a-2dfd-4238-9bc5-1f2741b3adc0","Type":"ContainerStarted","Data":"38ca49b6d50681a88f39a9cd2919c8a66c774fb8f336e24487c9cdb416b76d49"} Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.066945 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.069260 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" event={"ID":"ddffc31e-38aa-45c6-bad8-5787adf8c7fe","Type":"ContainerStarted","Data":"d4cd84a2f1ac088b4c7d37a8f4d260bdff288063d8770f5769160af4c3ffb0f3"} Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.072001 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" event={"ID":"434f69b5-0d70-418d-aa5e-04e307a5399c","Type":"ContainerStarted","Data":"f123b4e6572161277ea6ef582cbf684d5ca2e67fab6567625b0608dde5c645ef"} Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.074388 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" event={"ID":"337c06ec-4c42-41a1-8faa-60338d4eeddc","Type":"ContainerStarted","Data":"3b54725ce195693ec19232e35a585b644c640621eb13b70d6d4c2e71177ca75a"} Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.111434 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" podStartSLOduration=11.661524714 podStartE2EDuration="50.111409747s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.827973025 +0000 UTC m=+1216.386299913" lastFinishedPulling="2026-01-28 16:55:51.277858058 +0000 UTC m=+1254.836184946" observedRunningTime="2026-01-28 16:56:00.091088455 +0000 UTC m=+1263.649415363" watchObservedRunningTime="2026-01-28 16:56:00.111409747 +0000 UTC m=+1263.669736635" Jan 28 16:56:00 crc kubenswrapper[4877]: I0128 16:56:00.114467 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" podStartSLOduration=7.102711125 podStartE2EDuration="50.114460455s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.264905666 +0000 UTC m=+1215.823232564" lastFinishedPulling="2026-01-28 16:55:55.276655006 +0000 UTC m=+1258.834981894" observedRunningTime="2026-01-28 16:56:00.109944879 +0000 UTC m=+1263.668271777" watchObservedRunningTime="2026-01-28 16:56:00.114460455 +0000 UTC m=+1263.672787343" Jan 28 16:56:01 crc kubenswrapper[4877]: I0128 16:56:01.084950 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" event={"ID":"aa23290f-1702-4c63-92c7-047d18922df9","Type":"ContainerStarted","Data":"395601ce1d225c554f83e3fa8c74ae6d9778c90950476517c8013c73c177ab73"} Jan 28 16:56:01 crc kubenswrapper[4877]: I0128 16:56:01.087601 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" event={"ID":"2106e351-4841-4ab5-84eb-745af2cb3379","Type":"ContainerStarted","Data":"1174c3f64bfaa6fef6d4e75b376e0a3fdc513e2a1f446a5ba64cf6635db71cbe"} Jan 28 16:56:01 crc kubenswrapper[4877]: I0128 16:56:01.089812 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" event={"ID":"6fdf0399-314b-40df-96f2-c27008769f71","Type":"ContainerStarted","Data":"0373b90df935b858f1ef3f7966863bb822891082c7eaa61339716ff99dbc2a38"} Jan 28 16:56:01 crc kubenswrapper[4877]: I0128 16:56:01.113158 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" podStartSLOduration=11.496007103 podStartE2EDuration="51.113127523s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.736020942 +0000 UTC m=+1216.294347830" lastFinishedPulling="2026-01-28 16:55:52.353141362 +0000 UTC m=+1255.911468250" observedRunningTime="2026-01-28 16:56:01.110118756 +0000 UTC m=+1264.668445644" watchObservedRunningTime="2026-01-28 16:56:01.113127523 +0000 UTC m=+1264.671454411" Jan 28 16:56:01 crc kubenswrapper[4877]: I0128 16:56:01.131752 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" podStartSLOduration=11.613721911 podStartE2EDuration="51.131720319s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.835520364 +0000 UTC m=+1216.393847252" lastFinishedPulling="2026-01-28 16:55:52.353518772 +0000 UTC m=+1255.911845660" observedRunningTime="2026-01-28 16:56:01.127755709 +0000 UTC m=+1264.686082597" watchObservedRunningTime="2026-01-28 16:56:01.131720319 +0000 UTC m=+1264.690047207" Jan 28 16:56:01 crc kubenswrapper[4877]: I0128 16:56:01.150906 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" podStartSLOduration=5.173079761 podStartE2EDuration="51.150879092s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.83499273 +0000 UTC m=+1216.393319628" lastFinishedPulling="2026-01-28 16:55:58.812792071 +0000 UTC m=+1262.371118959" observedRunningTime="2026-01-28 16:56:01.145747569 +0000 UTC m=+1264.704074487" watchObservedRunningTime="2026-01-28 16:56:01.150879092 +0000 UTC m=+1264.709205980" Jan 28 16:56:01 crc kubenswrapper[4877]: I0128 16:56:01.175916 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.100411 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" event={"ID":"4533e492-2631-4c22-af2f-6bec08b23280","Type":"ContainerStarted","Data":"0d92a3c670ad5b68be983d0e260f09922c88319460bb145415cee851a138c953"} Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.101892 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" event={"ID":"970f0fe7-15e4-4fcf-bca0-eb07b26ba94a","Type":"ContainerStarted","Data":"702ba0f9cf7f6dcd496744181e90d307c6a11cb9ca6536d2a9dd0a79b6e7f49a"} Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.103421 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" event={"ID":"e068371c-e59c-4e57-8fd3-a55470f67063","Type":"ContainerStarted","Data":"bdb7f3847259eea4baba8ece4fcbe0b16a6141d1f122f8e061be05d442687433"} Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.112963 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" event={"ID":"2e00aa5f-0d94-48bb-9802-cfff5c46490f","Type":"ContainerStarted","Data":"6147dd339b16e46f0f3e576dee4baba43b8432ddf1a26175155cafc94a98841a"} Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.116952 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" event={"ID":"55abdd00-6b2b-44a2-ae22-bae3fbb12282","Type":"ContainerStarted","Data":"f3fa05922387de2078b0b71fddf5fcaedd13fd0637fcaf3d32dc8d9aa1c25186"} Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.121881 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" event={"ID":"f47bead5-fd76-4061-8ca4-51ed7bf2d97d","Type":"ContainerStarted","Data":"30c50881075c0ae4da024c364f0760f43d1731b4207691ccf97bbaa37630df2a"} Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.123837 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" event={"ID":"f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c","Type":"ContainerStarted","Data":"36debaf2b242369f6270e338cd468dd39b11737756f2ddb6dbcf148d9f31799a"} Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.124286 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.157554 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" podStartSLOduration=6.130042203 podStartE2EDuration="52.157530114s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.785133686 +0000 UTC m=+1216.343460574" lastFinishedPulling="2026-01-28 16:55:58.812621597 +0000 UTC m=+1262.370948485" observedRunningTime="2026-01-28 16:56:02.153534742 +0000 UTC m=+1265.711861810" watchObservedRunningTime="2026-01-28 16:56:02.157530114 +0000 UTC m=+1265.715857002" Jan 28 16:56:02 crc kubenswrapper[4877]: I0128 16:56:02.185524 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" podStartSLOduration=6.195184674 podStartE2EDuration="52.185487492s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.826704341 +0000 UTC m=+1216.385031229" lastFinishedPulling="2026-01-28 16:55:58.817007129 +0000 UTC m=+1262.375334047" observedRunningTime="2026-01-28 16:56:02.177298902 +0000 UTC m=+1265.735625790" watchObservedRunningTime="2026-01-28 16:56:02.185487492 +0000 UTC m=+1265.743814380" Jan 28 16:56:03 crc kubenswrapper[4877]: I0128 16:56:03.134577 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:56:03 crc kubenswrapper[4877]: I0128 16:56:03.164369 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" podStartSLOduration=34.540522723 podStartE2EDuration="53.164349472s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:40.143527626 +0000 UTC m=+1243.701854514" lastFinishedPulling="2026-01-28 16:55:58.767354365 +0000 UTC m=+1262.325681263" observedRunningTime="2026-01-28 16:56:03.163744016 +0000 UTC m=+1266.722070904" watchObservedRunningTime="2026-01-28 16:56:03.164349472 +0000 UTC m=+1266.722676360" Jan 28 16:56:04 crc kubenswrapper[4877]: I0128 16:56:04.144349 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" Jan 28 16:56:04 crc kubenswrapper[4877]: I0128 16:56:04.166801 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" podStartSLOduration=8.730789314999999 podStartE2EDuration="54.166779746s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:13.393003826 +0000 UTC m=+1216.951330714" lastFinishedPulling="2026-01-28 16:55:58.828994257 +0000 UTC m=+1262.387321145" observedRunningTime="2026-01-28 16:56:04.162557578 +0000 UTC m=+1267.720884466" watchObservedRunningTime="2026-01-28 16:56:04.166779746 +0000 UTC m=+1267.725106634" Jan 28 16:56:04 crc kubenswrapper[4877]: I0128 16:56:04.184973 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" podStartSLOduration=8.195771994 podStartE2EDuration="54.184937212s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.827258616 +0000 UTC m=+1216.385585504" lastFinishedPulling="2026-01-28 16:55:58.816423834 +0000 UTC m=+1262.374750722" observedRunningTime="2026-01-28 16:56:04.181876934 +0000 UTC m=+1267.740203822" watchObservedRunningTime="2026-01-28 16:56:04.184937212 +0000 UTC m=+1267.743264130" Jan 28 16:56:04 crc kubenswrapper[4877]: I0128 16:56:04.212997 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podStartSLOduration=35.991799136 podStartE2EDuration="54.212977782s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:39.736133321 +0000 UTC m=+1243.294460209" lastFinishedPulling="2026-01-28 16:55:57.957311967 +0000 UTC m=+1261.515638855" observedRunningTime="2026-01-28 16:56:04.209293917 +0000 UTC m=+1267.767620795" watchObservedRunningTime="2026-01-28 16:56:04.212977782 +0000 UTC m=+1267.771304670" Jan 28 16:56:05 crc kubenswrapper[4877]: I0128 16:56:05.154614 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" Jan 28 16:56:05 crc kubenswrapper[4877]: I0128 16:56:05.179876 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" podStartSLOduration=9.767266492 podStartE2EDuration="55.179853834s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:13.404335725 +0000 UTC m=+1216.962662603" lastFinishedPulling="2026-01-28 16:55:58.816923057 +0000 UTC m=+1262.375249945" observedRunningTime="2026-01-28 16:56:05.171981962 +0000 UTC m=+1268.730308850" watchObservedRunningTime="2026-01-28 16:56:05.179853834 +0000 UTC m=+1268.738180722" Jan 28 16:56:05 crc kubenswrapper[4877]: I0128 16:56:05.197057 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" podStartSLOduration=9.38634166 podStartE2EDuration="55.197037015s" podCreationTimestamp="2026-01-28 16:55:10 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.23965426 +0000 UTC m=+1215.797981148" lastFinishedPulling="2026-01-28 16:55:58.050349615 +0000 UTC m=+1261.608676503" observedRunningTime="2026-01-28 16:56:05.196332046 +0000 UTC m=+1268.754658934" watchObservedRunningTime="2026-01-28 16:56:05.197037015 +0000 UTC m=+1268.755363903" Jan 28 16:56:05 crc kubenswrapper[4877]: I0128 16:56:05.221060 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" podStartSLOduration=9.588491233 podStartE2EDuration="56.22103713s" podCreationTimestamp="2026-01-28 16:55:09 +0000 UTC" firstStartedPulling="2026-01-28 16:55:12.250406234 +0000 UTC m=+1215.808733122" lastFinishedPulling="2026-01-28 16:55:58.882952131 +0000 UTC m=+1262.441279019" observedRunningTime="2026-01-28 16:56:05.216917775 +0000 UTC m=+1268.775244673" watchObservedRunningTime="2026-01-28 16:56:05.22103713 +0000 UTC m=+1268.779364028" Jan 28 16:56:05 crc kubenswrapper[4877]: I0128 16:56:05.259071 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" podStartSLOduration=10.173112014 podStartE2EDuration="56.259047536s" podCreationTimestamp="2026-01-28 16:55:09 +0000 UTC" firstStartedPulling="2026-01-28 16:55:11.873267794 +0000 UTC m=+1215.431594672" lastFinishedPulling="2026-01-28 16:55:57.959203306 +0000 UTC m=+1261.517530194" observedRunningTime="2026-01-28 16:56:05.246252267 +0000 UTC m=+1268.804579155" watchObservedRunningTime="2026-01-28 16:56:05.259047536 +0000 UTC m=+1268.817374434" Jan 28 16:56:06 crc kubenswrapper[4877]: I0128 16:56:06.166889 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" Jan 28 16:56:06 crc kubenswrapper[4877]: I0128 16:56:06.247199 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" Jan 28 16:56:06 crc kubenswrapper[4877]: I0128 16:56:06.939376 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:56:06 crc kubenswrapper[4877]: I0128 16:56:06.946753 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 16:56:07 crc kubenswrapper[4877]: I0128 16:56:07.076824 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:56:07 crc kubenswrapper[4877]: I0128 16:56:07.076949 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.327814 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.332013 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.418651 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-gxvhn" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.468086 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.472680 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.750157 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.753075 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.846020 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.849530 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.914395 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.916582 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.917382 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.981651 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" Jan 28 16:56:10 crc kubenswrapper[4877]: I0128 16:56:10.986248 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" Jan 28 16:56:11 crc kubenswrapper[4877]: I0128 16:56:11.023339 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" Jan 28 16:56:11 crc kubenswrapper[4877]: I0128 16:56:11.178463 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" Jan 28 16:56:11 crc kubenswrapper[4877]: I0128 16:56:11.498528 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" Jan 28 16:56:11 crc kubenswrapper[4877]: I0128 16:56:11.514688 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" Jan 28 16:56:11 crc kubenswrapper[4877]: I0128 16:56:11.516448 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.485520 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-2h2hz"] Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.489598 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.516220 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.520796 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-pksmp" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.521274 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.521637 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.521861 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-2h2hz"] Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.590451 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14053bcd-4d39-413f-96ac-b485f4cfecfe-config\") pod \"dnsmasq-dns-675f4bcbfc-2h2hz\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.590786 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw7k4\" (UniqueName: \"kubernetes.io/projected/14053bcd-4d39-413f-96ac-b485f4cfecfe-kube-api-access-qw7k4\") pod \"dnsmasq-dns-675f4bcbfc-2h2hz\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.606508 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4xfmb"] Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.610951 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.613557 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.618325 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4xfmb"] Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.692502 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14053bcd-4d39-413f-96ac-b485f4cfecfe-config\") pod \"dnsmasq-dns-675f4bcbfc-2h2hz\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.692669 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw7k4\" (UniqueName: \"kubernetes.io/projected/14053bcd-4d39-413f-96ac-b485f4cfecfe-kube-api-access-qw7k4\") pod \"dnsmasq-dns-675f4bcbfc-2h2hz\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.693872 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14053bcd-4d39-413f-96ac-b485f4cfecfe-config\") pod \"dnsmasq-dns-675f4bcbfc-2h2hz\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.717412 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw7k4\" (UniqueName: \"kubernetes.io/projected/14053bcd-4d39-413f-96ac-b485f4cfecfe-kube-api-access-qw7k4\") pod \"dnsmasq-dns-675f4bcbfc-2h2hz\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.794570 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgrgt\" (UniqueName: \"kubernetes.io/projected/25758352-6c0a-4bee-b445-048c62593ca2-kube-api-access-zgrgt\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.794772 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-config\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.794842 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.817627 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.897069 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.897566 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgrgt\" (UniqueName: \"kubernetes.io/projected/25758352-6c0a-4bee-b445-048c62593ca2-kube-api-access-zgrgt\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.897782 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-config\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.898185 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.898811 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-config\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:26 crc kubenswrapper[4877]: I0128 16:56:26.953823 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgrgt\" (UniqueName: \"kubernetes.io/projected/25758352-6c0a-4bee-b445-048c62593ca2-kube-api-access-zgrgt\") pod \"dnsmasq-dns-78dd6ddcc-4xfmb\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:27 crc kubenswrapper[4877]: I0128 16:56:27.241532 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:56:27 crc kubenswrapper[4877]: I0128 16:56:27.303814 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-2h2hz"] Jan 28 16:56:27 crc kubenswrapper[4877]: W0128 16:56:27.310653 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14053bcd_4d39_413f_96ac_b485f4cfecfe.slice/crio-35bd156f85056c0be35ab7c9313febac1c09a1fca1e3fdcaa7663564f4a74bc8 WatchSource:0}: Error finding container 35bd156f85056c0be35ab7c9313febac1c09a1fca1e3fdcaa7663564f4a74bc8: Status 404 returned error can't find the container with id 35bd156f85056c0be35ab7c9313febac1c09a1fca1e3fdcaa7663564f4a74bc8 Jan 28 16:56:27 crc kubenswrapper[4877]: I0128 16:56:27.396170 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" event={"ID":"14053bcd-4d39-413f-96ac-b485f4cfecfe","Type":"ContainerStarted","Data":"35bd156f85056c0be35ab7c9313febac1c09a1fca1e3fdcaa7663564f4a74bc8"} Jan 28 16:56:27 crc kubenswrapper[4877]: I0128 16:56:27.736359 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4xfmb"] Jan 28 16:56:28 crc kubenswrapper[4877]: I0128 16:56:28.409772 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" event={"ID":"25758352-6c0a-4bee-b445-048c62593ca2","Type":"ContainerStarted","Data":"93aa0fd7f8b41aff82aa1e77d050ccc2f559c1f41b899722321f6484453ef709"} Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.265592 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-2h2hz"] Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.303447 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xlsv5"] Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.305374 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.316464 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xlsv5"] Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.455223 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-config\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.455322 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-dns-svc\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.455421 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-256pn\" (UniqueName: \"kubernetes.io/projected/94736b29-94ba-4356-8b0c-439047f52fdc-kube-api-access-256pn\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.557066 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-config\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.557144 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-dns-svc\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.557210 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-256pn\" (UniqueName: \"kubernetes.io/projected/94736b29-94ba-4356-8b0c-439047f52fdc-kube-api-access-256pn\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.559431 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-dns-svc\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.562092 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-config\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.594824 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-256pn\" (UniqueName: \"kubernetes.io/projected/94736b29-94ba-4356-8b0c-439047f52fdc-kube-api-access-256pn\") pod \"dnsmasq-dns-666b6646f7-xlsv5\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.653895 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.656657 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4xfmb"] Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.680612 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qvfkk"] Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.683493 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.703563 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qvfkk"] Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.863302 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znzfc\" (UniqueName: \"kubernetes.io/projected/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-kube-api-access-znzfc\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.863380 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.863452 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-config\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.964983 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znzfc\" (UniqueName: \"kubernetes.io/projected/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-kube-api-access-znzfc\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.965654 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.965708 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-config\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.967308 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-config\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:29 crc kubenswrapper[4877]: I0128 16:56:29.967389 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:29.998091 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znzfc\" (UniqueName: \"kubernetes.io/projected/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-kube-api-access-znzfc\") pod \"dnsmasq-dns-57d769cc4f-qvfkk\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.076062 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.438343 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.440868 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.445714 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.445951 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.446156 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.446332 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.446495 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.451140 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-fc87d" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.454985 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.455808 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.485305 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.487858 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.502114 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.504411 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.514814 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.554259 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.564876 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xlsv5"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.588814 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-pod-info\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.588962 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.588990 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/951f6a86-2dbc-402b-bb10-9a16d347c697-pod-info\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589022 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-config-data\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589043 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96b5016-3ed4-4f98-8708-f69092894981-pod-info\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589063 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-server-conf\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589092 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589115 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589137 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589160 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589185 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wbrp\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-kube-api-access-7wbrp\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589222 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djqhq\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-kube-api-access-djqhq\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589249 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589272 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96b5016-3ed4-4f98-8708-f69092894981-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589298 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589322 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589347 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589684 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.589763 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591102 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/951f6a86-2dbc-402b-bb10-9a16d347c697-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591179 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591203 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591241 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591312 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbpmr\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-kube-api-access-bbpmr\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591344 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-server-conf\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591411 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591698 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-config-data\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591812 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591852 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591961 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.591981 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-server-conf\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.592048 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-config-data\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.592079 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.691536 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qvfkk"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.694863 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.694903 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.694927 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.694952 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbpmr\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-kube-api-access-bbpmr\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.694972 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-server-conf\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.694994 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695022 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-config-data\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695057 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695081 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695120 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695134 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-server-conf\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695160 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-config-data\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695179 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695195 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-pod-info\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695220 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695250 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/951f6a86-2dbc-402b-bb10-9a16d347c697-pod-info\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695280 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-config-data\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695302 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96b5016-3ed4-4f98-8708-f69092894981-pod-info\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695321 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-server-conf\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695355 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695382 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695410 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695432 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695463 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wbrp\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-kube-api-access-7wbrp\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695511 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djqhq\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-kube-api-access-djqhq\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695534 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695550 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96b5016-3ed4-4f98-8708-f69092894981-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695580 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695601 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695625 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695642 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695670 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.695689 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/951f6a86-2dbc-402b-bb10-9a16d347c697-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.697650 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-server-conf\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.697976 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.699006 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-config-data\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.699026 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.699891 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.707004 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-server-conf\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.707961 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.710714 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.710829 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/951f6a86-2dbc-402b-bb10-9a16d347c697-pod-info\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.711411 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-config-data\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.713356 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.713625 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.713746 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-server-conf\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.714076 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.714652 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.714665 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.714720 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.714775 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-config-data\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.718997 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.719595 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.719657 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9fcbc32b1e89d88c9c45f66191d0bf2f4114e554cfde863b999975859aec8c96/globalmount\"" pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.719705 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.719798 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/445da7c8eee792eca514a7f00c533dabbf59427d0648942612a95e6cb358ee0f/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.720189 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.727999 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.728075 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/15ce35ef8412648b43e30449966151966004a81cada05d357841689a3aa2f9ec/globalmount\"" pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.732104 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.733789 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wbrp\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-kube-api-access-7wbrp\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.735504 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.738521 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/951f6a86-2dbc-402b-bb10-9a16d347c697-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.738592 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djqhq\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-kube-api-access-djqhq\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.741340 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96b5016-3ed4-4f98-8708-f69092894981-pod-info\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.741396 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-pod-info\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.743547 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.744295 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96b5016-3ed4-4f98-8708-f69092894981-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.744605 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbpmr\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-kube-api-access-bbpmr\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.778453 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.789103 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " pod="openstack/rabbitmq-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.800129 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.818729 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.832794 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.849346 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.874723 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.878497 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.892841 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.892943 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.892841 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.893279 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.893426 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.893633 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-j77zw" Jan 28 16:56:30 crc kubenswrapper[4877]: I0128 16:56:30.895535 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.010933 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5d261b3a-c6f9-48bd-92de-b76d3821e778-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011022 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011079 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011137 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011162 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011189 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5d261b3a-c6f9-48bd-92de-b76d3821e778-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011241 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011264 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011309 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkp5p\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-kube-api-access-pkp5p\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011354 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.011400 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.085365 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116008 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116091 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116178 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5d261b3a-c6f9-48bd-92de-b76d3821e778-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116220 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116244 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116277 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116313 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116340 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5d261b3a-c6f9-48bd-92de-b76d3821e778-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116361 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116382 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.116406 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkp5p\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-kube-api-access-pkp5p\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.118071 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.118392 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.118678 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.119614 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.122848 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5d261b3a-c6f9-48bd-92de-b76d3821e778-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.125135 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.128967 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.129170 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.130007 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5d261b3a-c6f9-48bd-92de-b76d3821e778-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.132530 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.132563 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/dd13eadb591ae01e691b5638b2fec0ba44b9edb8b2da41d7fd4b1906fea1a0e1/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.149769 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkp5p\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-kube-api-access-pkp5p\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.213001 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.272561 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.500327 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" event={"ID":"94736b29-94ba-4356-8b0c-439047f52fdc","Type":"ContainerStarted","Data":"b451836f4aae9a838953d3edc482eb77013e7f3d5c2e4152718e23680e4ad5c3"} Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.504792 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" event={"ID":"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343","Type":"ContainerStarted","Data":"6671c185e5d92216f56323ffd65b7c95aa6ee6da353d04bc160923dbb5e5f565"} Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.545399 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.582869 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.842123 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 16:56:31 crc kubenswrapper[4877]: I0128 16:56:31.862402 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 16:56:31 crc kubenswrapper[4877]: W0128 16:56:31.889416 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d261b3a_c6f9_48bd_92de_b76d3821e778.slice/crio-ac6dc4628309769ce3a3015e4c19f1e05fba9f79f1d39100372e0e3baa85312c WatchSource:0}: Error finding container ac6dc4628309769ce3a3015e4c19f1e05fba9f79f1d39100372e0e3baa85312c: Status 404 returned error can't find the container with id ac6dc4628309769ce3a3015e4c19f1e05fba9f79f1d39100372e0e3baa85312c Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.002611 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.005756 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.009318 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.009359 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.009353 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-z5cng" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.009435 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.015811 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.037155 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.042695 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-kolla-config\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.042777 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.042825 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6cf870d4-330c-490b-8fcc-77028d084de4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.042855 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf870d4-330c-490b-8fcc-77028d084de4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.042937 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6m7t\" (UniqueName: \"kubernetes.io/projected/6cf870d4-330c-490b-8fcc-77028d084de4-kube-api-access-r6m7t\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.042982 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cf870d4-330c-490b-8fcc-77028d084de4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.043012 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-config-data-default\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.043060 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.143815 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.144613 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6cf870d4-330c-490b-8fcc-77028d084de4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.144708 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6cf870d4-330c-490b-8fcc-77028d084de4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.144819 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf870d4-330c-490b-8fcc-77028d084de4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.145549 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.145236 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6m7t\" (UniqueName: \"kubernetes.io/projected/6cf870d4-330c-490b-8fcc-77028d084de4-kube-api-access-r6m7t\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.145793 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cf870d4-330c-490b-8fcc-77028d084de4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.146346 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-config-data-default\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.146459 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.146588 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-kolla-config\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.147138 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-config-data-default\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.147471 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6cf870d4-330c-490b-8fcc-77028d084de4-kolla-config\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.154290 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.154322 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/cec11c9fc9633e953d736ade34baca2be3f23912ed5afe11d4c67fe6639d4613/globalmount\"" pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.154324 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cf870d4-330c-490b-8fcc-77028d084de4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.154410 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf870d4-330c-490b-8fcc-77028d084de4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.165454 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6m7t\" (UniqueName: \"kubernetes.io/projected/6cf870d4-330c-490b-8fcc-77028d084de4-kube-api-access-r6m7t\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.216328 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-477c6c8b-d66b-4d95-b87d-1a3b630e87a5\") pod \"openstack-galera-0\" (UID: \"6cf870d4-330c-490b-8fcc-77028d084de4\") " pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.339202 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.519349 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"d96b5016-3ed4-4f98-8708-f69092894981","Type":"ContainerStarted","Data":"67365e2f49d6e215b009152f3e45420221902eb62d9b05c5f8cb5dc80ce1d0bd"} Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.520214 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2f642a61-430e-4dfc-b6b6-3ee68161eaf6","Type":"ContainerStarted","Data":"fc92e019da570fb319c5d8e9f56baa7afaa013a1f0778ac7d11e9ad9595d8042"} Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.523395 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"951f6a86-2dbc-402b-bb10-9a16d347c697","Type":"ContainerStarted","Data":"24ce48cff87d13c1dcf823f413b0ce90d32a1ae7cf3e57ec845c45bc1c4a8f25"} Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.524805 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5d261b3a-c6f9-48bd-92de-b76d3821e778","Type":"ContainerStarted","Data":"ac6dc4628309769ce3a3015e4c19f1e05fba9f79f1d39100372e0e3baa85312c"} Jan 28 16:56:32 crc kubenswrapper[4877]: I0128 16:56:32.914130 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 16:56:32 crc kubenswrapper[4877]: W0128 16:56:32.934780 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cf870d4_330c_490b_8fcc_77028d084de4.slice/crio-3a55bb0b8c8a6fcdf32e5538f2c55b0af98bfbcb4f55bc7239de0e6db1a02010 WatchSource:0}: Error finding container 3a55bb0b8c8a6fcdf32e5538f2c55b0af98bfbcb4f55bc7239de0e6db1a02010: Status 404 returned error can't find the container with id 3a55bb0b8c8a6fcdf32e5538f2c55b0af98bfbcb4f55bc7239de0e6db1a02010 Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.212060 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.213815 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.219666 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.219994 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.220535 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.220727 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-ng79p" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.233239 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.289952 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.290034 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.290061 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.290095 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.290222 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.290287 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.290317 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g42kv\" (UniqueName: \"kubernetes.io/projected/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-kube-api-access-g42kv\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.290353 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.391875 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g42kv\" (UniqueName: \"kubernetes.io/projected/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-kube-api-access-g42kv\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.392670 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.392797 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.392866 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.392891 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.392946 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.393094 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.393195 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.394046 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.394607 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.396913 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.397535 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.397850 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.397892 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0c91848496082687a2e59628a0706faee82ea5c860ca9038fed28ddd38dc5d91/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.403590 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.405898 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.431008 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g42kv\" (UniqueName: \"kubernetes.io/projected/5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c-kube-api-access-g42kv\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.465795 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-18f8f43a-ca5b-4648-93c4-5a03cffc5ff1\") pod \"openstack-cell1-galera-0\" (UID: \"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c\") " pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.546098 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.561118 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6cf870d4-330c-490b-8fcc-77028d084de4","Type":"ContainerStarted","Data":"3a55bb0b8c8a6fcdf32e5538f2c55b0af98bfbcb4f55bc7239de0e6db1a02010"} Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.578624 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.584966 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.606458 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8ef99d3-7153-4bd3-bc86-fe13ad891084-config-data\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.606661 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb7bz\" (UniqueName: \"kubernetes.io/projected/f8ef99d3-7153-4bd3-bc86-fe13ad891084-kube-api-access-zb7bz\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.606748 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8ef99d3-7153-4bd3-bc86-fe13ad891084-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.606774 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ef99d3-7153-4bd3-bc86-fe13ad891084-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.607184 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.607516 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f8ef99d3-7153-4bd3-bc86-fe13ad891084-kolla-config\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.618852 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.619040 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-qjkvx" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.620067 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.712017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f8ef99d3-7153-4bd3-bc86-fe13ad891084-kolla-config\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.712130 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8ef99d3-7153-4bd3-bc86-fe13ad891084-config-data\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.712168 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb7bz\" (UniqueName: \"kubernetes.io/projected/f8ef99d3-7153-4bd3-bc86-fe13ad891084-kube-api-access-zb7bz\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.712213 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8ef99d3-7153-4bd3-bc86-fe13ad891084-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.712239 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ef99d3-7153-4bd3-bc86-fe13ad891084-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.716754 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8ef99d3-7153-4bd3-bc86-fe13ad891084-config-data\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.717073 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f8ef99d3-7153-4bd3-bc86-fe13ad891084-kolla-config\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.720001 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ef99d3-7153-4bd3-bc86-fe13ad891084-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.728978 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8ef99d3-7153-4bd3-bc86-fe13ad891084-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.759979 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb7bz\" (UniqueName: \"kubernetes.io/projected/f8ef99d3-7153-4bd3-bc86-fe13ad891084-kube-api-access-zb7bz\") pod \"memcached-0\" (UID: \"f8ef99d3-7153-4bd3-bc86-fe13ad891084\") " pod="openstack/memcached-0" Jan 28 16:56:33 crc kubenswrapper[4877]: I0128 16:56:33.944430 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 16:56:34 crc kubenswrapper[4877]: I0128 16:56:34.298452 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 16:56:34 crc kubenswrapper[4877]: W0128 16:56:34.300332 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e8b9412_b4c3_411a_9b83_fe67dfe2cc1c.slice/crio-1f790c2f3c94dfe5c0ae6f025d918e05f8887f40c529330c7f6dad5c7e7f46e4 WatchSource:0}: Error finding container 1f790c2f3c94dfe5c0ae6f025d918e05f8887f40c529330c7f6dad5c7e7f46e4: Status 404 returned error can't find the container with id 1f790c2f3c94dfe5c0ae6f025d918e05f8887f40c529330c7f6dad5c7e7f46e4 Jan 28 16:56:34 crc kubenswrapper[4877]: I0128 16:56:34.550060 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 16:56:34 crc kubenswrapper[4877]: W0128 16:56:34.560374 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8ef99d3_7153_4bd3_bc86_fe13ad891084.slice/crio-199de35266eab7bcd9ae561ca73ea9a66248f806e08e8df428f484b940e1ce7b WatchSource:0}: Error finding container 199de35266eab7bcd9ae561ca73ea9a66248f806e08e8df428f484b940e1ce7b: Status 404 returned error can't find the container with id 199de35266eab7bcd9ae561ca73ea9a66248f806e08e8df428f484b940e1ce7b Jan 28 16:56:34 crc kubenswrapper[4877]: I0128 16:56:34.581685 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f8ef99d3-7153-4bd3-bc86-fe13ad891084","Type":"ContainerStarted","Data":"199de35266eab7bcd9ae561ca73ea9a66248f806e08e8df428f484b940e1ce7b"} Jan 28 16:56:34 crc kubenswrapper[4877]: I0128 16:56:34.586277 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c","Type":"ContainerStarted","Data":"1f790c2f3c94dfe5c0ae6f025d918e05f8887f40c529330c7f6dad5c7e7f46e4"} Jan 28 16:56:35 crc kubenswrapper[4877]: I0128 16:56:35.880445 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 16:56:35 crc kubenswrapper[4877]: I0128 16:56:35.882708 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 16:56:35 crc kubenswrapper[4877]: I0128 16:56:35.886091 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-xd5xb" Jan 28 16:56:35 crc kubenswrapper[4877]: I0128 16:56:35.895815 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 16:56:35 crc kubenswrapper[4877]: I0128 16:56:35.988788 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48vgj\" (UniqueName: \"kubernetes.io/projected/08b23417-7d7c-4d16-85e0-4f06c5e9b314-kube-api-access-48vgj\") pod \"kube-state-metrics-0\" (UID: \"08b23417-7d7c-4d16-85e0-4f06c5e9b314\") " pod="openstack/kube-state-metrics-0" Jan 28 16:56:36 crc kubenswrapper[4877]: I0128 16:56:36.091694 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48vgj\" (UniqueName: \"kubernetes.io/projected/08b23417-7d7c-4d16-85e0-4f06c5e9b314-kube-api-access-48vgj\") pod \"kube-state-metrics-0\" (UID: \"08b23417-7d7c-4d16-85e0-4f06c5e9b314\") " pod="openstack/kube-state-metrics-0" Jan 28 16:56:36 crc kubenswrapper[4877]: I0128 16:56:36.135964 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48vgj\" (UniqueName: \"kubernetes.io/projected/08b23417-7d7c-4d16-85e0-4f06c5e9b314-kube-api-access-48vgj\") pod \"kube-state-metrics-0\" (UID: \"08b23417-7d7c-4d16-85e0-4f06c5e9b314\") " pod="openstack/kube-state-metrics-0" Jan 28 16:56:36 crc kubenswrapper[4877]: I0128 16:56:36.227051 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.006835 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m"] Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.010178 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.017073 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-wbdw9" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.017330 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.061809 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m"] Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.093919 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.094007 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.162379 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38ca3e30-9962-4a70-8ca0-c96691111265-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-lwb9m\" (UID: \"38ca3e30-9962-4a70-8ca0-c96691111265\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.167869 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx297\" (UniqueName: \"kubernetes.io/projected/38ca3e30-9962-4a70-8ca0-c96691111265-kube-api-access-rx297\") pod \"observability-ui-dashboards-66cbf594b5-lwb9m\" (UID: \"38ca3e30-9962-4a70-8ca0-c96691111265\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.270140 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx297\" (UniqueName: \"kubernetes.io/projected/38ca3e30-9962-4a70-8ca0-c96691111265-kube-api-access-rx297\") pod \"observability-ui-dashboards-66cbf594b5-lwb9m\" (UID: \"38ca3e30-9962-4a70-8ca0-c96691111265\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.270245 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38ca3e30-9962-4a70-8ca0-c96691111265-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-lwb9m\" (UID: \"38ca3e30-9962-4a70-8ca0-c96691111265\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: E0128 16:56:37.270673 4877 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Jan 28 16:56:37 crc kubenswrapper[4877]: E0128 16:56:37.270777 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/38ca3e30-9962-4a70-8ca0-c96691111265-serving-cert podName:38ca3e30-9962-4a70-8ca0-c96691111265 nodeName:}" failed. No retries permitted until 2026-01-28 16:56:37.77074966 +0000 UTC m=+1301.329076548 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/38ca3e30-9962-4a70-8ca0-c96691111265-serving-cert") pod "observability-ui-dashboards-66cbf594b5-lwb9m" (UID: "38ca3e30-9962-4a70-8ca0-c96691111265") : secret "observability-ui-dashboards" not found Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.314081 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx297\" (UniqueName: \"kubernetes.io/projected/38ca3e30-9962-4a70-8ca0-c96691111265-kube-api-access-rx297\") pod \"observability-ui-dashboards-66cbf594b5-lwb9m\" (UID: \"38ca3e30-9962-4a70-8ca0-c96691111265\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.476834 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.483660 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.497345 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.499363 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.499676 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.499913 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-k2v98" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.500044 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.500358 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.501528 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.515550 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.575555 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6c679b788d-zft7r"] Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.577337 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.579423 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.596718 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.579473 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.598546 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.598703 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.598770 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.598795 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.598820 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-config\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.598853 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g9ck\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-kube-api-access-9g9ck\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.598964 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/36256bdd-5ada-4651-8944-ed4c8978ed2c-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.599059 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.611575 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6c679b788d-zft7r"] Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.702682 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.705029 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.705123 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-config\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.705180 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g9ck\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-kube-api-access-9g9ck\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.706646 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-oauth-config\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.706771 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/36256bdd-5ada-4651-8944-ed4c8978ed2c-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.706898 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-service-ca\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.706935 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707145 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707193 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-config\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707261 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707287 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-trusted-ca-bundle\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707342 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-serving-cert\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707452 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707581 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nfnh\" (UniqueName: \"kubernetes.io/projected/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-kube-api-access-9nfnh\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707610 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-oauth-serving-cert\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.707691 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.715360 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-config\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.716159 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.716534 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.718465 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.719735 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.720934 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/36256bdd-5ada-4651-8944-ed4c8978ed2c-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.725935 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.733979 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.734035 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e56a4798f43fc8e09f4b73faaf38f0237452e71872991f2da2d2463cbf63cff6/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.745612 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.787202 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g9ck\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-kube-api-access-9g9ck\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.810453 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nfnh\" (UniqueName: \"kubernetes.io/projected/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-kube-api-access-9nfnh\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.810514 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-oauth-serving-cert\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.810627 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-oauth-config\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.810691 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-service-ca\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.810814 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-config\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.810858 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-trusted-ca-bundle\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.811187 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-serving-cert\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.811371 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38ca3e30-9962-4a70-8ca0-c96691111265-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-lwb9m\" (UID: \"38ca3e30-9962-4a70-8ca0-c96691111265\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.816797 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-service-ca\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.819079 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-oauth-serving-cert\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.822791 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38ca3e30-9962-4a70-8ca0-c96691111265-serving-cert\") pod \"observability-ui-dashboards-66cbf594b5-lwb9m\" (UID: \"38ca3e30-9962-4a70-8ca0-c96691111265\") " pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.823438 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-config\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.824402 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-trusted-ca-bundle\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.831579 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.835109 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-oauth-config\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.843900 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-console-serving-cert\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.862063 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nfnh\" (UniqueName: \"kubernetes.io/projected/1a65e44f-17f2-42b4-80f9-5f4f06368d7d-kube-api-access-9nfnh\") pod \"console-6c679b788d-zft7r\" (UID: \"1a65e44f-17f2-42b4-80f9-5f4f06368d7d\") " pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.953111 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.996832 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" Jan 28 16:56:37 crc kubenswrapper[4877]: I0128 16:56:37.998762 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:38 crc kubenswrapper[4877]: I0128 16:56:38.183913 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:38.740785 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"08b23417-7d7c-4d16-85e0-4f06c5e9b314","Type":"ContainerStarted","Data":"a6dcb1c1e2325f9da6e7f731e5e8640b86799599cdb46640a3b9effa5b4e72bd"} Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.214190 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m"] Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.374765 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:56:39 crc kubenswrapper[4877]: W0128 16:56:39.435508 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36256bdd_5ada_4651_8944_ed4c8978ed2c.slice/crio-4b2ecfd565282078feb900f4608502714c9c7ea947b3bf94111217c0b19823e1 WatchSource:0}: Error finding container 4b2ecfd565282078feb900f4608502714c9c7ea947b3bf94111217c0b19823e1: Status 404 returned error can't find the container with id 4b2ecfd565282078feb900f4608502714c9c7ea947b3bf94111217c0b19823e1 Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.476134 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.478329 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.486443 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.488252 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.488520 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-dxmfx" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.488616 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.495705 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.499862 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.567796 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6c679b788d-zft7r"] Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.570800 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.570852 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.570908 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48287337-b9e0-4ad0-8db3-00cf201ca4cf-config\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.570940 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j95m\" (UniqueName: \"kubernetes.io/projected/48287337-b9e0-4ad0-8db3-00cf201ca4cf-kube-api-access-7j95m\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.570994 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/48287337-b9e0-4ad0-8db3-00cf201ca4cf-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.571055 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.571090 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.571108 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48287337-b9e0-4ad0-8db3-00cf201ca4cf-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: W0128 16:56:39.641830 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a65e44f_17f2_42b4_80f9_5f4f06368d7d.slice/crio-d63b2130d1c54016aea99ca89eb51d5aca7e5f9689f0cc5917ace8206f3ac693 WatchSource:0}: Error finding container d63b2130d1c54016aea99ca89eb51d5aca7e5f9689f0cc5917ace8206f3ac693: Status 404 returned error can't find the container with id d63b2130d1c54016aea99ca89eb51d5aca7e5f9689f0cc5917ace8206f3ac693 Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674517 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674585 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48287337-b9e0-4ad0-8db3-00cf201ca4cf-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674700 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674751 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674778 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48287337-b9e0-4ad0-8db3-00cf201ca4cf-config\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674810 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j95m\" (UniqueName: \"kubernetes.io/projected/48287337-b9e0-4ad0-8db3-00cf201ca4cf-kube-api-access-7j95m\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674874 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/48287337-b9e0-4ad0-8db3-00cf201ca4cf-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.674934 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.676525 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48287337-b9e0-4ad0-8db3-00cf201ca4cf-config\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.676885 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/48287337-b9e0-4ad0-8db3-00cf201ca4cf-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.680494 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48287337-b9e0-4ad0-8db3-00cf201ca4cf-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.684593 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.686241 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.686274 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7db1535094ab1a01f4d63de8b4935c74ee7b0b42e78b6224b49f7052c9fc4311/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.699539 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.702035 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j95m\" (UniqueName: \"kubernetes.io/projected/48287337-b9e0-4ad0-8db3-00cf201ca4cf-kube-api-access-7j95m\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.703611 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/48287337-b9e0-4ad0-8db3-00cf201ca4cf-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.763955 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c679b788d-zft7r" event={"ID":"1a65e44f-17f2-42b4-80f9-5f4f06368d7d","Type":"ContainerStarted","Data":"d63b2130d1c54016aea99ca89eb51d5aca7e5f9689f0cc5917ace8206f3ac693"} Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.769136 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerStarted","Data":"4b2ecfd565282078feb900f4608502714c9c7ea947b3bf94111217c0b19823e1"} Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.772133 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" event={"ID":"38ca3e30-9962-4a70-8ca0-c96691111265","Type":"ContainerStarted","Data":"97a190ee13da3505b9148bdbf439503e4a7df8b97b0585874e718d332b3f17fd"} Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.794833 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-36fa4e8d-efd0-4c28-af6d-a0cbca208f62\") pod \"ovsdbserver-nb-0\" (UID: \"48287337-b9e0-4ad0-8db3-00cf201ca4cf\") " pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:39 crc kubenswrapper[4877]: I0128 16:56:39.824060 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.243609 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ggcj2"] Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.247598 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.251210 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-5cqmw" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.251433 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.251441 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.261747 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ggcj2"] Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.294962 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-vlhkh"] Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.301535 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.339022 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vlhkh"] Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.394768 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97413ef2-e41a-453c-b6d1-5d93c7d72274-combined-ca-bundle\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.395075 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-log-ovn\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.395293 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-log\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.395393 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-run-ovn\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.395510 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-lib\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.395600 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/97413ef2-e41a-453c-b6d1-5d93c7d72274-ovn-controller-tls-certs\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.395912 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cc5k\" (UniqueName: \"kubernetes.io/projected/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-kube-api-access-7cc5k\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.396108 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-etc-ovs\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.396261 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm52j\" (UniqueName: \"kubernetes.io/projected/97413ef2-e41a-453c-b6d1-5d93c7d72274-kube-api-access-sm52j\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.396310 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97413ef2-e41a-453c-b6d1-5d93c7d72274-scripts\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.396403 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-run\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.396497 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-run\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.396533 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-scripts\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.498813 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-log-ovn\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499371 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-log\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499415 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-run-ovn\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499458 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-lib\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499516 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/97413ef2-e41a-453c-b6d1-5d93c7d72274-ovn-controller-tls-certs\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499612 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cc5k\" (UniqueName: \"kubernetes.io/projected/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-kube-api-access-7cc5k\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499685 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-etc-ovs\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499762 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm52j\" (UniqueName: \"kubernetes.io/projected/97413ef2-e41a-453c-b6d1-5d93c7d72274-kube-api-access-sm52j\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499788 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97413ef2-e41a-453c-b6d1-5d93c7d72274-scripts\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499854 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-run\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499883 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-run\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.499909 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-scripts\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.500052 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97413ef2-e41a-453c-b6d1-5d93c7d72274-combined-ca-bundle\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.507339 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-etc-ovs\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.507498 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-log\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.507636 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-log-ovn\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.507858 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-run-ovn\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.508965 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-run\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.509198 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/97413ef2-e41a-453c-b6d1-5d93c7d72274-var-run\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.510327 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-scripts\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.510396 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97413ef2-e41a-453c-b6d1-5d93c7d72274-scripts\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.511717 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-var-lib\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.516990 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97413ef2-e41a-453c-b6d1-5d93c7d72274-combined-ca-bundle\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.521074 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/97413ef2-e41a-453c-b6d1-5d93c7d72274-ovn-controller-tls-certs\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.524527 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm52j\" (UniqueName: \"kubernetes.io/projected/97413ef2-e41a-453c-b6d1-5d93c7d72274-kube-api-access-sm52j\") pod \"ovn-controller-ggcj2\" (UID: \"97413ef2-e41a-453c-b6d1-5d93c7d72274\") " pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.530216 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cc5k\" (UniqueName: \"kubernetes.io/projected/2e2bc12a-dbc9-4053-a356-18b7a5c5a115-kube-api-access-7cc5k\") pod \"ovn-controller-ovs-vlhkh\" (UID: \"2e2bc12a-dbc9-4053-a356-18b7a5c5a115\") " pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.577145 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ggcj2" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.654325 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:56:40 crc kubenswrapper[4877]: I0128 16:56:40.830693 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6c679b788d-zft7r" event={"ID":"1a65e44f-17f2-42b4-80f9-5f4f06368d7d","Type":"ContainerStarted","Data":"865c8e6a5a0406f9f4833deef0ad987d155ab79daa209a438d5ab57ce01360f2"} Jan 28 16:56:41 crc kubenswrapper[4877]: I0128 16:56:41.298038 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ggcj2"] Jan 28 16:56:41 crc kubenswrapper[4877]: W0128 16:56:41.305876 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97413ef2_e41a_453c_b6d1_5d93c7d72274.slice/crio-8fee433de170e7d7cb71ac751139405385df2f31383d216508e8cacf5d531852 WatchSource:0}: Error finding container 8fee433de170e7d7cb71ac751139405385df2f31383d216508e8cacf5d531852: Status 404 returned error can't find the container with id 8fee433de170e7d7cb71ac751139405385df2f31383d216508e8cacf5d531852 Jan 28 16:56:41 crc kubenswrapper[4877]: I0128 16:56:41.898388 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ggcj2" event={"ID":"97413ef2-e41a-453c-b6d1-5d93c7d72274","Type":"ContainerStarted","Data":"8fee433de170e7d7cb71ac751139405385df2f31383d216508e8cacf5d531852"} Jan 28 16:56:41 crc kubenswrapper[4877]: I0128 16:56:41.933433 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6c679b788d-zft7r" podStartSLOduration=4.933410211 podStartE2EDuration="4.933410211s" podCreationTimestamp="2026-01-28 16:56:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:56:41.918265976 +0000 UTC m=+1305.476592864" watchObservedRunningTime="2026-01-28 16:56:41.933410211 +0000 UTC m=+1305.491737099" Jan 28 16:56:42 crc kubenswrapper[4877]: I0128 16:56:42.497852 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.191568 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vlhkh"] Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.347152 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.349517 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.352319 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.352610 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.352666 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.352795 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-wj6qd" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.356256 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496351 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/813d900a-8a7c-4cef-b418-3a1f5eb28f69-config\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496440 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/813d900a-8a7c-4cef-b418-3a1f5eb28f69-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496492 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/813d900a-8a7c-4cef-b418-3a1f5eb28f69-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496527 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496760 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496872 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6tb9\" (UniqueName: \"kubernetes.io/projected/813d900a-8a7c-4cef-b418-3a1f5eb28f69-kube-api-access-n6tb9\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496939 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.496982 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.599979 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6tb9\" (UniqueName: \"kubernetes.io/projected/813d900a-8a7c-4cef-b418-3a1f5eb28f69-kube-api-access-n6tb9\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.600089 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.600265 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.600431 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/813d900a-8a7c-4cef-b418-3a1f5eb28f69-config\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.601677 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/813d900a-8a7c-4cef-b418-3a1f5eb28f69-config\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.601778 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/813d900a-8a7c-4cef-b418-3a1f5eb28f69-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.601810 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/813d900a-8a7c-4cef-b418-3a1f5eb28f69-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.601854 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.602573 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/813d900a-8a7c-4cef-b418-3a1f5eb28f69-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.604248 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/813d900a-8a7c-4cef-b418-3a1f5eb28f69-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.604697 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.606508 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.606652 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e92f4ed572aa277c8dd361775c9a8c3c4915ea4d990118690a88b6b2f7e1b262/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.614109 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.623588 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.624983 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/813d900a-8a7c-4cef-b418-3a1f5eb28f69-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.641466 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6tb9\" (UniqueName: \"kubernetes.io/projected/813d900a-8a7c-4cef-b418-3a1f5eb28f69-kube-api-access-n6tb9\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.677787 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53dda0ce-f78a-454f-936b-8b2ede36df27\") pod \"ovsdbserver-sb-0\" (UID: \"813d900a-8a7c-4cef-b418-3a1f5eb28f69\") " pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:43 crc kubenswrapper[4877]: I0128 16:56:43.707540 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 16:56:47 crc kubenswrapper[4877]: I0128 16:56:47.999387 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:47 crc kubenswrapper[4877]: I0128 16:56:47.999834 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:48 crc kubenswrapper[4877]: I0128 16:56:48.005136 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:48 crc kubenswrapper[4877]: I0128 16:56:48.989214 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6c679b788d-zft7r" Jan 28 16:56:49 crc kubenswrapper[4877]: I0128 16:56:49.065745 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5dbb47b56f-hd7gx"] Jan 28 16:56:50 crc kubenswrapper[4877]: I0128 16:56:50.447817 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 16:56:50 crc kubenswrapper[4877]: I0128 16:56:50.448161 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 16:56:54 crc kubenswrapper[4877]: W0128 16:56:54.537713 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod48287337_b9e0_4ad0_8db3_00cf201ca4cf.slice/crio-593bcd1ae014e1394fb3e115c6a4887908196f06120b586b2cf58ac8a1bde91b WatchSource:0}: Error finding container 593bcd1ae014e1394fb3e115c6a4887908196f06120b586b2cf58ac8a1bde91b: Status 404 returned error can't find the container with id 593bcd1ae014e1394fb3e115c6a4887908196f06120b586b2cf58ac8a1bde91b Jan 28 16:56:55 crc kubenswrapper[4877]: I0128 16:56:55.068749 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"48287337-b9e0-4ad0-8db3-00cf201ca4cf","Type":"ContainerStarted","Data":"593bcd1ae014e1394fb3e115c6a4887908196f06120b586b2cf58ac8a1bde91b"} Jan 28 16:56:55 crc kubenswrapper[4877]: I0128 16:56:55.070202 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vlhkh" event={"ID":"2e2bc12a-dbc9-4053-a356-18b7a5c5a115","Type":"ContainerStarted","Data":"c3091e3c289e53eecef42dc1b220dbc399a7452097edc0a3c7a4a308f89ac673"} Jan 28 16:56:59 crc kubenswrapper[4877]: E0128 16:56:59.502369 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 28 16:56:59 crc kubenswrapper[4877]: E0128 16:56:59.503138 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7wbrp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-1_openstack(2f642a61-430e-4dfc-b6b6-3ee68161eaf6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:56:59 crc kubenswrapper[4877]: E0128 16:56:59.502792 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 28 16:56:59 crc kubenswrapper[4877]: E0128 16:56:59.503835 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-djqhq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-2_openstack(d96b5016-3ed4-4f98-8708-f69092894981): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:56:59 crc kubenswrapper[4877]: E0128 16:56:59.504826 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-1" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" Jan 28 16:56:59 crc kubenswrapper[4877]: E0128 16:56:59.505906 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" Jan 28 16:57:00 crc kubenswrapper[4877]: E0128 16:57:00.149611 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" Jan 28 16:57:00 crc kubenswrapper[4877]: E0128 16:57:00.150353 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-1" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" Jan 28 16:57:04 crc kubenswrapper[4877]: E0128 16:57:04.717631 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 28 16:57:04 crc kubenswrapper[4877]: E0128 16:57:04.717455 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 28 16:57:04 crc kubenswrapper[4877]: E0128 16:57:04.718304 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bbpmr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(951f6a86-2dbc-402b-bb10-9a16d347c697): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:04 crc kubenswrapper[4877]: E0128 16:57:04.718388 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pkp5p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(5d261b3a-c6f9-48bd-92de-b76d3821e778): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:04 crc kubenswrapper[4877]: E0128 16:57:04.719626 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" Jan 28 16:57:04 crc kubenswrapper[4877]: E0128 16:57:04.719656 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" Jan 28 16:57:05 crc kubenswrapper[4877]: E0128 16:57:05.011190 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a" Jan 28 16:57:05 crc kubenswrapper[4877]: E0128 16:57:05.011450 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init-config-reloader,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a,Command:[/bin/prometheus-config-reloader],Args:[--watch-interval=0 --listen-address=:8081 --config-file=/etc/prometheus/config/prometheus.yaml.gz --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0 --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-1 --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-2],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:reloader-init,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:SHARD,Value:0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/prometheus/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-out,ReadOnly:false,MountPath:/etc/prometheus/config_out,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-0,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-1,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-1,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-2,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-2,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9g9ck,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(36256bdd-5ada-4651-8944-ed4c8978ed2c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 16:57:05 crc kubenswrapper[4877]: E0128 16:57:05.012723 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/prometheus-metric-storage-0" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" Jan 28 16:57:05 crc kubenswrapper[4877]: E0128 16:57:05.207651 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:9a2097bc5b2e02bc1703f64c452ce8fe4bc6775b732db930ff4770b76ae4653a\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" Jan 28 16:57:05 crc kubenswrapper[4877]: E0128 16:57:05.210601 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" Jan 28 16:57:05 crc kubenswrapper[4877]: E0128 16:57:05.210916 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" Jan 28 16:57:06 crc kubenswrapper[4877]: E0128 16:57:06.946334 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Jan 28 16:57:06 crc kubenswrapper[4877]: E0128 16:57:06.946848 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r6m7t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(6cf870d4-330c-490b-8fcc-77028d084de4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:06 crc kubenswrapper[4877]: E0128 16:57:06.948563 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.076198 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.076277 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.076345 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.077385 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f756cfb15c7c947e0f669ee8051d31638e5edc388c7a044a2e8411c49dfcce24"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.077446 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://f756cfb15c7c947e0f669ee8051d31638e5edc388c7a044a2e8411c49dfcce24" gracePeriod=600 Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.227081 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="f756cfb15c7c947e0f669ee8051d31638e5edc388c7a044a2e8411c49dfcce24" exitCode=0 Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.227171 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"f756cfb15c7c947e0f669ee8051d31638e5edc388c7a044a2e8411c49dfcce24"} Jan 28 16:57:07 crc kubenswrapper[4877]: I0128 16:57:07.227249 4877 scope.go:117] "RemoveContainer" containerID="7c4a3e8fea9bc1e25b572220a93b200bdf216a51c64b746a15d1ef6b91b206c8" Jan 28 16:57:07 crc kubenswrapper[4877]: E0128 16:57:07.229798 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" Jan 28 16:57:07 crc kubenswrapper[4877]: E0128 16:57:07.796439 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 16:57:07 crc kubenswrapper[4877]: E0128 16:57:07.797034 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zgrgt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-4xfmb_openstack(25758352-6c0a-4bee-b445-048c62593ca2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:07 crc kubenswrapper[4877]: E0128 16:57:07.799548 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" podUID="25758352-6c0a-4bee-b445-048c62593ca2" Jan 28 16:57:07 crc kubenswrapper[4877]: E0128 16:57:07.806648 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Jan 28 16:57:07 crc kubenswrapper[4877]: E0128 16:57:07.806865 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g42kv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:07 crc kubenswrapper[4877]: E0128 16:57:07.808073 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" Jan 28 16:57:08 crc kubenswrapper[4877]: E0128 16:57:08.288109 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 16:57:08 crc kubenswrapper[4877]: E0128 16:57:08.288330 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-256pn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-xlsv5_openstack(94736b29-94ba-4356-8b0c-439047f52fdc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:08 crc kubenswrapper[4877]: E0128 16:57:08.289685 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" podUID="94736b29-94ba-4356-8b0c-439047f52fdc" Jan 28 16:57:08 crc kubenswrapper[4877]: E0128 16:57:08.317973 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" Jan 28 16:57:09 crc kubenswrapper[4877]: E0128 16:57:09.245970 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" podUID="94736b29-94ba-4356-8b0c-439047f52fdc" Jan 28 16:57:09 crc kubenswrapper[4877]: E0128 16:57:09.309779 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 16:57:09 crc kubenswrapper[4877]: E0128 16:57:09.310129 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qw7k4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-2h2hz_openstack(14053bcd-4d39-413f-96ac-b485f4cfecfe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:09 crc kubenswrapper[4877]: E0128 16:57:09.311383 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" podUID="14053bcd-4d39-413f-96ac-b485f4cfecfe" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.348760 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.445859 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.453115 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.608355 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgrgt\" (UniqueName: \"kubernetes.io/projected/25758352-6c0a-4bee-b445-048c62593ca2-kube-api-access-zgrgt\") pod \"25758352-6c0a-4bee-b445-048c62593ca2\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.609839 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-config\") pod \"25758352-6c0a-4bee-b445-048c62593ca2\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.609889 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qw7k4\" (UniqueName: \"kubernetes.io/projected/14053bcd-4d39-413f-96ac-b485f4cfecfe-kube-api-access-qw7k4\") pod \"14053bcd-4d39-413f-96ac-b485f4cfecfe\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.610143 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14053bcd-4d39-413f-96ac-b485f4cfecfe-config\") pod \"14053bcd-4d39-413f-96ac-b485f4cfecfe\" (UID: \"14053bcd-4d39-413f-96ac-b485f4cfecfe\") " Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.610371 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-config" (OuterVolumeSpecName: "config") pod "25758352-6c0a-4bee-b445-048c62593ca2" (UID: "25758352-6c0a-4bee-b445-048c62593ca2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.610404 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-dns-svc\") pod \"25758352-6c0a-4bee-b445-048c62593ca2\" (UID: \"25758352-6c0a-4bee-b445-048c62593ca2\") " Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.610896 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "25758352-6c0a-4bee-b445-048c62593ca2" (UID: "25758352-6c0a-4bee-b445-048c62593ca2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.611022 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14053bcd-4d39-413f-96ac-b485f4cfecfe-config" (OuterVolumeSpecName: "config") pod "14053bcd-4d39-413f-96ac-b485f4cfecfe" (UID: "14053bcd-4d39-413f-96ac-b485f4cfecfe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.611156 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.611181 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25758352-6c0a-4bee-b445-048c62593ca2-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.611195 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14053bcd-4d39-413f-96ac-b485f4cfecfe-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.614900 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14053bcd-4d39-413f-96ac-b485f4cfecfe-kube-api-access-qw7k4" (OuterVolumeSpecName: "kube-api-access-qw7k4") pod "14053bcd-4d39-413f-96ac-b485f4cfecfe" (UID: "14053bcd-4d39-413f-96ac-b485f4cfecfe"). InnerVolumeSpecName "kube-api-access-qw7k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.621567 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25758352-6c0a-4bee-b445-048c62593ca2-kube-api-access-zgrgt" (OuterVolumeSpecName: "kube-api-access-zgrgt") pod "25758352-6c0a-4bee-b445-048c62593ca2" (UID: "25758352-6c0a-4bee-b445-048c62593ca2"). InnerVolumeSpecName "kube-api-access-zgrgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:12 crc kubenswrapper[4877]: E0128 16:57:12.707710 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Jan 28 16:57:12 crc kubenswrapper[4877]: E0128 16:57:12.707912 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n646h66bh675h687h66bhc7h68dh599h58dhc8h68dhbch8dh68fhf7h65dh87h5c9hc7h5dch68ch6hc9h64ch5cfhf5h669h87h574h566h8h5b9q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sm52j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ggcj2_openstack(97413ef2-e41a-453c-b6d1-5d93c7d72274): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:12 crc kubenswrapper[4877]: E0128 16:57:12.709271 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ggcj2" podUID="97413ef2-e41a-453c-b6d1-5d93c7d72274" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.712571 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgrgt\" (UniqueName: \"kubernetes.io/projected/25758352-6c0a-4bee-b445-048c62593ca2-kube-api-access-zgrgt\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:12 crc kubenswrapper[4877]: I0128 16:57:12.712609 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qw7k4\" (UniqueName: \"kubernetes.io/projected/14053bcd-4d39-413f-96ac-b485f4cfecfe-kube-api-access-qw7k4\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.299395 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.299379 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-4xfmb" event={"ID":"25758352-6c0a-4bee-b445-048c62593ca2","Type":"ContainerDied","Data":"93aa0fd7f8b41aff82aa1e77d050ccc2f559c1f41b899722321f6484453ef709"} Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.300958 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" event={"ID":"14053bcd-4d39-413f-96ac-b485f4cfecfe","Type":"ContainerDied","Data":"35bd156f85056c0be35ab7c9313febac1c09a1fca1e3fdcaa7663564f4a74bc8"} Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.301019 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-2h2hz" Jan 28 16:57:13 crc kubenswrapper[4877]: E0128 16:57:13.302751 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-ggcj2" podUID="97413ef2-e41a-453c-b6d1-5d93c7d72274" Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.457300 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-2h2hz"] Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.476322 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-2h2hz"] Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.502984 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4xfmb"] Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.517388 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-4xfmb"] Jan 28 16:57:13 crc kubenswrapper[4877]: I0128 16:57:13.655453 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 16:57:13 crc kubenswrapper[4877]: W0128 16:57:13.810867 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod813d900a_8a7c_4cef_b418_3a1f5eb28f69.slice/crio-0ffb9b0d219886dfe8d2266a550314045df9adb13da84ade09b8b2f46cdd99b6 WatchSource:0}: Error finding container 0ffb9b0d219886dfe8d2266a550314045df9adb13da84ade09b8b2f46cdd99b6: Status 404 returned error can't find the container with id 0ffb9b0d219886dfe8d2266a550314045df9adb13da84ade09b8b2f46cdd99b6 Jan 28 16:57:14 crc kubenswrapper[4877]: I0128 16:57:14.132810 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5dbb47b56f-hd7gx" podUID="d3001a64-58f6-4e84-bf0a-fa4f5889ecef" containerName="console" containerID="cri-o://a561fe02e6e73ad39a8b019daa59f68e508489df916fa2458e914548631defcc" gracePeriod=15 Jan 28 16:57:14 crc kubenswrapper[4877]: I0128 16:57:14.314671 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"813d900a-8a7c-4cef-b418-3a1f5eb28f69","Type":"ContainerStarted","Data":"0ffb9b0d219886dfe8d2266a550314045df9adb13da84ade09b8b2f46cdd99b6"} Jan 28 16:57:14 crc kubenswrapper[4877]: I0128 16:57:14.322462 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82"} Jan 28 16:57:14 crc kubenswrapper[4877]: I0128 16:57:14.349431 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5dbb47b56f-hd7gx_d3001a64-58f6-4e84-bf0a-fa4f5889ecef/console/0.log" Jan 28 16:57:14 crc kubenswrapper[4877]: I0128 16:57:14.349729 4877 generic.go:334] "Generic (PLEG): container finished" podID="d3001a64-58f6-4e84-bf0a-fa4f5889ecef" containerID="a561fe02e6e73ad39a8b019daa59f68e508489df916fa2458e914548631defcc" exitCode=2 Jan 28 16:57:14 crc kubenswrapper[4877]: I0128 16:57:14.349780 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dbb47b56f-hd7gx" event={"ID":"d3001a64-58f6-4e84-bf0a-fa4f5889ecef","Type":"ContainerDied","Data":"a561fe02e6e73ad39a8b019daa59f68e508489df916fa2458e914548631defcc"} Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.249538 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5dbb47b56f-hd7gx_d3001a64-58f6-4e84-bf0a-fa4f5889ecef/console/0.log" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.250696 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.340757 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14053bcd-4d39-413f-96ac-b485f4cfecfe" path="/var/lib/kubelet/pods/14053bcd-4d39-413f-96ac-b485f4cfecfe/volumes" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.341270 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25758352-6c0a-4bee-b445-048c62593ca2" path="/var/lib/kubelet/pods/25758352-6c0a-4bee-b445-048c62593ca2/volumes" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.370348 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5dbb47b56f-hd7gx_d3001a64-58f6-4e84-bf0a-fa4f5889ecef/console/0.log" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.370550 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dbb47b56f-hd7gx" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.390297 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-serving-cert\") pod \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.390411 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6255\" (UniqueName: \"kubernetes.io/projected/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-kube-api-access-f6255\") pod \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.390503 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-oauth-config\") pod \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.390545 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-oauth-serving-cert\") pod \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.390571 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-config\") pod \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.390600 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-service-ca\") pod \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.390789 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-trusted-ca-bundle\") pod \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\" (UID: \"d3001a64-58f6-4e84-bf0a-fa4f5889ecef\") " Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.392110 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-config" (OuterVolumeSpecName: "console-config") pod "d3001a64-58f6-4e84-bf0a-fa4f5889ecef" (UID: "d3001a64-58f6-4e84-bf0a-fa4f5889ecef"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.392190 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-service-ca" (OuterVolumeSpecName: "service-ca") pod "d3001a64-58f6-4e84-bf0a-fa4f5889ecef" (UID: "d3001a64-58f6-4e84-bf0a-fa4f5889ecef"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.392245 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "d3001a64-58f6-4e84-bf0a-fa4f5889ecef" (UID: "d3001a64-58f6-4e84-bf0a-fa4f5889ecef"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.392339 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d3001a64-58f6-4e84-bf0a-fa4f5889ecef" (UID: "d3001a64-58f6-4e84-bf0a-fa4f5889ecef"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.397812 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-kube-api-access-f6255" (OuterVolumeSpecName: "kube-api-access-f6255") pod "d3001a64-58f6-4e84-bf0a-fa4f5889ecef" (UID: "d3001a64-58f6-4e84-bf0a-fa4f5889ecef"). InnerVolumeSpecName "kube-api-access-f6255". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.397868 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "d3001a64-58f6-4e84-bf0a-fa4f5889ecef" (UID: "d3001a64-58f6-4e84-bf0a-fa4f5889ecef"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.400935 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "d3001a64-58f6-4e84-bf0a-fa4f5889ecef" (UID: "d3001a64-58f6-4e84-bf0a-fa4f5889ecef"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.473735 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dbb47b56f-hd7gx" event={"ID":"d3001a64-58f6-4e84-bf0a-fa4f5889ecef","Type":"ContainerDied","Data":"4db7c6ea2faf81099b4c20dcc99aa91cbcebea3968f8b19fc1154f7344074860"} Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.473824 4877 scope.go:117] "RemoveContainer" containerID="a561fe02e6e73ad39a8b019daa59f68e508489df916fa2458e914548631defcc" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.503182 4877 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.503211 4877 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.503227 4877 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.503238 4877 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.503250 4877 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.503261 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6255\" (UniqueName: \"kubernetes.io/projected/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-kube-api-access-f6255\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.503272 4877 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d3001a64-58f6-4e84-bf0a-fa4f5889ecef-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.737895 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5dbb47b56f-hd7gx"] Jan 28 16:57:15 crc kubenswrapper[4877]: I0128 16:57:15.753666 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5dbb47b56f-hd7gx"] Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.388444 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f8ef99d3-7153-4bd3-bc86-fe13ad891084","Type":"ContainerStarted","Data":"17a7abfed01b0aa5d04e2719908b249df8091f52ac2014a28174f22031acdeed"} Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.390045 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.391782 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" event={"ID":"38ca3e30-9962-4a70-8ca0-c96691111265","Type":"ContainerStarted","Data":"2f95ea96294abaaae3ab206565f2c9f74a2a12ccddeee8281cd47403e55b4815"} Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.394399 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"48287337-b9e0-4ad0-8db3-00cf201ca4cf","Type":"ContainerStarted","Data":"7627ffab3723132658940db6c719244f534d6719c0f571e8070b02369d28d507"} Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.395996 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"08b23417-7d7c-4d16-85e0-4f06c5e9b314","Type":"ContainerStarted","Data":"cb9005be24322b55c269621c815b4ed453924a096386bfd30bb713d6ac3bad73"} Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.396120 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.398220 4877 generic.go:334] "Generic (PLEG): container finished" podID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerID="9e46c726f583b44c76d276563eba6656c2bf3e9b133df45aa51bb78c4d6e0846" exitCode=0 Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.398298 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" event={"ID":"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343","Type":"ContainerDied","Data":"9e46c726f583b44c76d276563eba6656c2bf3e9b133df45aa51bb78c4d6e0846"} Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.401637 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vlhkh" event={"ID":"2e2bc12a-dbc9-4053-a356-18b7a5c5a115","Type":"ContainerStarted","Data":"f4208f5f6fe5194915ad19b2934f45a28fd5e2f7a07a8ad9ea9c68abb8776ff2"} Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.426608 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=4.823481893 podStartE2EDuration="43.42658364s" podCreationTimestamp="2026-01-28 16:56:33 +0000 UTC" firstStartedPulling="2026-01-28 16:56:34.567570479 +0000 UTC m=+1298.125897367" lastFinishedPulling="2026-01-28 16:57:13.170672236 +0000 UTC m=+1336.728999114" observedRunningTime="2026-01-28 16:57:16.416773897 +0000 UTC m=+1339.975100775" watchObservedRunningTime="2026-01-28 16:57:16.42658364 +0000 UTC m=+1339.984910518" Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.516596 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-66cbf594b5-lwb9m" podStartSLOduration=6.646093636 podStartE2EDuration="40.516566284s" podCreationTimestamp="2026-01-28 16:56:36 +0000 UTC" firstStartedPulling="2026-01-28 16:56:39.210598033 +0000 UTC m=+1302.768924921" lastFinishedPulling="2026-01-28 16:57:13.081070681 +0000 UTC m=+1336.639397569" observedRunningTime="2026-01-28 16:57:16.488204656 +0000 UTC m=+1340.046531564" watchObservedRunningTime="2026-01-28 16:57:16.516566284 +0000 UTC m=+1340.074893192" Jan 28 16:57:16 crc kubenswrapper[4877]: I0128 16:57:16.533502 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=4.35941654 podStartE2EDuration="41.533449355s" podCreationTimestamp="2026-01-28 16:56:35 +0000 UTC" firstStartedPulling="2026-01-28 16:56:37.886334122 +0000 UTC m=+1301.444661010" lastFinishedPulling="2026-01-28 16:57:15.060366937 +0000 UTC m=+1338.618693825" observedRunningTime="2026-01-28 16:57:16.515971469 +0000 UTC m=+1340.074298357" watchObservedRunningTime="2026-01-28 16:57:16.533449355 +0000 UTC m=+1340.091776243" Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.346822 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3001a64-58f6-4e84-bf0a-fa4f5889ecef" path="/var/lib/kubelet/pods/d3001a64-58f6-4e84-bf0a-fa4f5889ecef/volumes" Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.413015 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2f642a61-430e-4dfc-b6b6-3ee68161eaf6","Type":"ContainerStarted","Data":"c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310"} Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.415890 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"813d900a-8a7c-4cef-b418-3a1f5eb28f69","Type":"ContainerStarted","Data":"01f3602a5db48ac8db57672c80f191cc4b6af7d23b49700a78e5ed81ffbb8bda"} Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.419563 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" event={"ID":"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343","Type":"ContainerStarted","Data":"67f211e7bc66c98c3ae3e2225ebb9e27f31f6723c005f67712f924edd3b8d1e3"} Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.419686 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.421948 4877 generic.go:334] "Generic (PLEG): container finished" podID="2e2bc12a-dbc9-4053-a356-18b7a5c5a115" containerID="f4208f5f6fe5194915ad19b2934f45a28fd5e2f7a07a8ad9ea9c68abb8776ff2" exitCode=0 Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.422021 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vlhkh" event={"ID":"2e2bc12a-dbc9-4053-a356-18b7a5c5a115","Type":"ContainerDied","Data":"f4208f5f6fe5194915ad19b2934f45a28fd5e2f7a07a8ad9ea9c68abb8776ff2"} Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.428404 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"d96b5016-3ed4-4f98-8708-f69092894981","Type":"ContainerStarted","Data":"52942a240b967e15d227027e6b2bf1fa3d2ce227a325fd22c64b791144cfc420"} Jan 28 16:57:17 crc kubenswrapper[4877]: I0128 16:57:17.491463 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" podStartSLOduration=5.636475212 podStartE2EDuration="48.491437238s" podCreationTimestamp="2026-01-28 16:56:29 +0000 UTC" firstStartedPulling="2026-01-28 16:56:30.71909526 +0000 UTC m=+1294.277422148" lastFinishedPulling="2026-01-28 16:57:13.574057296 +0000 UTC m=+1337.132384174" observedRunningTime="2026-01-28 16:57:17.478608514 +0000 UTC m=+1341.036935402" watchObservedRunningTime="2026-01-28 16:57:17.491437238 +0000 UTC m=+1341.049764126" Jan 28 16:57:18 crc kubenswrapper[4877]: I0128 16:57:18.438103 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vlhkh" event={"ID":"2e2bc12a-dbc9-4053-a356-18b7a5c5a115","Type":"ContainerStarted","Data":"3f075b49008aea23e326d4d1d6685e2d8c814e324a82cace97541fbe496203f0"} Jan 28 16:57:19 crc kubenswrapper[4877]: I0128 16:57:19.450742 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vlhkh" event={"ID":"2e2bc12a-dbc9-4053-a356-18b7a5c5a115","Type":"ContainerStarted","Data":"ba12349be49de21af8f5c4ebcf1b27850b4e5de7ea3ecb491913a84923d32031"} Jan 28 16:57:19 crc kubenswrapper[4877]: I0128 16:57:19.451340 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:57:19 crc kubenswrapper[4877]: I0128 16:57:19.451365 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:57:19 crc kubenswrapper[4877]: I0128 16:57:19.453048 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"951f6a86-2dbc-402b-bb10-9a16d347c697","Type":"ContainerStarted","Data":"4814d6a51261f73fb6d1a6d3f616147b0069871f1e5588245254e80036c4c35d"} Jan 28 16:57:19 crc kubenswrapper[4877]: I0128 16:57:19.455456 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5d261b3a-c6f9-48bd-92de-b76d3821e778","Type":"ContainerStarted","Data":"8e0fb86256a77a22a05970915add645a2a85e79bc329b01962f4b68e24315021"} Jan 28 16:57:19 crc kubenswrapper[4877]: I0128 16:57:19.510114 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-vlhkh" podStartSLOduration=20.241417731 podStartE2EDuration="39.510093136s" podCreationTimestamp="2026-01-28 16:56:40 +0000 UTC" firstStartedPulling="2026-01-28 16:56:54.540494885 +0000 UTC m=+1318.098821783" lastFinishedPulling="2026-01-28 16:57:13.8091703 +0000 UTC m=+1337.367497188" observedRunningTime="2026-01-28 16:57:19.47767841 +0000 UTC m=+1343.036005298" watchObservedRunningTime="2026-01-28 16:57:19.510093136 +0000 UTC m=+1343.068420024" Jan 28 16:57:23 crc kubenswrapper[4877]: I0128 16:57:23.947098 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.290991 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-4hfbx"] Jan 28 16:57:24 crc kubenswrapper[4877]: E0128 16:57:24.291844 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3001a64-58f6-4e84-bf0a-fa4f5889ecef" containerName="console" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.291859 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3001a64-58f6-4e84-bf0a-fa4f5889ecef" containerName="console" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.292078 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3001a64-58f6-4e84-bf0a-fa4f5889ecef" containerName="console" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.292930 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.301427 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.312202 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-4hfbx"] Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.448457 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47m6f\" (UniqueName: \"kubernetes.io/projected/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-kube-api-access-47m6f\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.448530 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-ovn-rundir\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.448626 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-config\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.448868 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-combined-ca-bundle\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.449146 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-ovs-rundir\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.449308 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.518563 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" event={"ID":"94736b29-94ba-4356-8b0c-439047f52fdc","Type":"ContainerStarted","Data":"35bcf03f3551ef4102abffe6b71d6a260d9257863106c329a069d6591ce10497"} Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.551462 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-config\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.551581 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-combined-ca-bundle\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.551663 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-ovs-rundir\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.551730 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.551857 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47m6f\" (UniqueName: \"kubernetes.io/projected/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-kube-api-access-47m6f\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.551885 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-ovn-rundir\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.552033 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-ovs-rundir\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.552060 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-ovn-rundir\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.552432 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-config\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.571384 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.571713 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-combined-ca-bundle\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.581035 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47m6f\" (UniqueName: \"kubernetes.io/projected/1dbf58c0-9bbf-43dd-8438-98c5f16c1146-kube-api-access-47m6f\") pod \"ovn-controller-metrics-4hfbx\" (UID: \"1dbf58c0-9bbf-43dd-8438-98c5f16c1146\") " pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.626741 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qvfkk"] Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.627025 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="dnsmasq-dns" containerID="cri-o://67f211e7bc66c98c3ae3e2225ebb9e27f31f6723c005f67712f924edd3b8d1e3" gracePeriod=10 Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.628720 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.637205 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4hfbx" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.756629 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-nrkf6"] Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.765566 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.775740 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.777733 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-nrkf6"] Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.862493 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.862558 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.862636 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgp55\" (UniqueName: \"kubernetes.io/projected/3bf40370-b176-4eae-bab1-9f55f99132ce-kube-api-access-lgp55\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.863090 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-config\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.957676 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xlsv5"] Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.967544 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.967621 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.967762 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgp55\" (UniqueName: \"kubernetes.io/projected/3bf40370-b176-4eae-bab1-9f55f99132ce-kube-api-access-lgp55\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.967843 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-config\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.969110 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-config\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.969388 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:24 crc kubenswrapper[4877]: I0128 16:57:24.970276 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.006441 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-g7gld"] Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.006794 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgp55\" (UniqueName: \"kubernetes.io/projected/3bf40370-b176-4eae-bab1-9f55f99132ce-kube-api-access-lgp55\") pod \"dnsmasq-dns-7fd796d7df-nrkf6\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.008440 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.011873 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.023348 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-g7gld"] Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.070135 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.070181 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhlnx\" (UniqueName: \"kubernetes.io/projected/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-kube-api-access-qhlnx\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.079309 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.080230 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.080507 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.081158 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.126903 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.184392 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.184511 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.184549 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhlnx\" (UniqueName: \"kubernetes.io/projected/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-kube-api-access-qhlnx\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.184724 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.184769 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.185538 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.186101 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.186801 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.188512 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.210786 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhlnx\" (UniqueName: \"kubernetes.io/projected/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-kube-api-access-qhlnx\") pod \"dnsmasq-dns-86db49b7ff-g7gld\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.355713 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.508579 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-4hfbx"] Jan 28 16:57:25 crc kubenswrapper[4877]: W0128 16:57:25.514314 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1dbf58c0_9bbf_43dd_8438_98c5f16c1146.slice/crio-cc81b4635ac7fceb878c9c2de2d12008ef88696f084ba0fef08d15e4a6d8bc6f WatchSource:0}: Error finding container cc81b4635ac7fceb878c9c2de2d12008ef88696f084ba0fef08d15e4a6d8bc6f: Status 404 returned error can't find the container with id cc81b4635ac7fceb878c9c2de2d12008ef88696f084ba0fef08d15e4a6d8bc6f Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.534172 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4hfbx" event={"ID":"1dbf58c0-9bbf-43dd-8438-98c5f16c1146","Type":"ContainerStarted","Data":"cc81b4635ac7fceb878c9c2de2d12008ef88696f084ba0fef08d15e4a6d8bc6f"} Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.752766 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-nrkf6"] Jan 28 16:57:25 crc kubenswrapper[4877]: I0128 16:57:25.901931 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-g7gld"] Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.233241 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.591298 4877 generic.go:334] "Generic (PLEG): container finished" podID="94736b29-94ba-4356-8b0c-439047f52fdc" containerID="35bcf03f3551ef4102abffe6b71d6a260d9257863106c329a069d6591ce10497" exitCode=0 Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.591452 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" event={"ID":"94736b29-94ba-4356-8b0c-439047f52fdc","Type":"ContainerDied","Data":"35bcf03f3551ef4102abffe6b71d6a260d9257863106c329a069d6591ce10497"} Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.623067 4877 generic.go:334] "Generic (PLEG): container finished" podID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerID="67f211e7bc66c98c3ae3e2225ebb9e27f31f6723c005f67712f924edd3b8d1e3" exitCode=0 Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.623147 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" event={"ID":"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343","Type":"ContainerDied","Data":"67f211e7bc66c98c3ae3e2225ebb9e27f31f6723c005f67712f924edd3b8d1e3"} Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.681693 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-nrkf6"] Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.803574 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-2nqdn"] Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.806254 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.852046 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2nqdn"] Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.964594 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-dns-svc\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.964688 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-config\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.964832 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.964858 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrq48\" (UniqueName: \"kubernetes.io/projected/88801c74-cfbd-4eee-936b-2899b69196aa-kube-api-access-mrq48\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:26 crc kubenswrapper[4877]: I0128 16:57:26.964921 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.073305 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.073470 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-dns-svc\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.073532 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-config\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.073605 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.073626 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrq48\" (UniqueName: \"kubernetes.io/projected/88801c74-cfbd-4eee-936b-2899b69196aa-kube-api-access-mrq48\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.074548 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.074971 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-config\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.079284 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.079542 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-dns-svc\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.165074 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrq48\" (UniqueName: \"kubernetes.io/projected/88801c74-cfbd-4eee-936b-2899b69196aa-kube-api-access-mrq48\") pod \"dnsmasq-dns-698758b865-2nqdn\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.188371 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.936231 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.947445 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.951135 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.951795 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.951795 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-5xs89" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.952237 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 28 16:57:27 crc kubenswrapper[4877]: I0128 16:57:27.974289 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.107385 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.107446 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03f31376-451f-4360-bea2-4d4e557568f0-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.107516 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xl2l\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-kube-api-access-6xl2l\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.107545 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/03f31376-451f-4360-bea2-4d4e557568f0-cache\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.107603 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.107723 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/03f31376-451f-4360-bea2-4d4e557568f0-lock\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.212203 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/03f31376-451f-4360-bea2-4d4e557568f0-lock\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.212645 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.212739 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03f31376-451f-4360-bea2-4d4e557568f0-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.212803 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xl2l\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-kube-api-access-6xl2l\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.212820 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/03f31376-451f-4360-bea2-4d4e557568f0-cache\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.212874 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.212995 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/03f31376-451f-4360-bea2-4d4e557568f0-lock\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: E0128 16:57:28.213132 4877 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 16:57:28 crc kubenswrapper[4877]: E0128 16:57:28.213379 4877 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 16:57:28 crc kubenswrapper[4877]: E0128 16:57:28.213546 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift podName:03f31376-451f-4360-bea2-4d4e557568f0 nodeName:}" failed. No retries permitted until 2026-01-28 16:57:28.713482075 +0000 UTC m=+1352.271808963 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift") pod "swift-storage-0" (UID: "03f31376-451f-4360-bea2-4d4e557568f0") : configmap "swift-ring-files" not found Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.213697 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/03f31376-451f-4360-bea2-4d4e557568f0-cache\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.220435 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03f31376-451f-4360-bea2-4d4e557568f0-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.221703 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.221762 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9bd04c3c1360ba8613b4e79ad3ac86bc07fb991a8f3eedcd8f86fd1920e71692/globalmount\"" pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.233370 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xl2l\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-kube-api-access-6xl2l\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.287074 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d72789a4-1ac0-42aa-92c2-3de29d259cd5\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: I0128 16:57:28.729734 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:28 crc kubenswrapper[4877]: E0128 16:57:28.730117 4877 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 16:57:28 crc kubenswrapper[4877]: E0128 16:57:28.730176 4877 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 16:57:28 crc kubenswrapper[4877]: E0128 16:57:28.730269 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift podName:03f31376-451f-4360-bea2-4d4e557568f0 nodeName:}" failed. No retries permitted until 2026-01-28 16:57:29.730246956 +0000 UTC m=+1353.288573844 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift") pod "swift-storage-0" (UID: "03f31376-451f-4360-bea2-4d4e557568f0") : configmap "swift-ring-files" not found Jan 28 16:57:29 crc kubenswrapper[4877]: I0128 16:57:29.755403 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:29 crc kubenswrapper[4877]: E0128 16:57:29.755619 4877 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 16:57:29 crc kubenswrapper[4877]: E0128 16:57:29.755660 4877 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 16:57:29 crc kubenswrapper[4877]: E0128 16:57:29.755736 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift podName:03f31376-451f-4360-bea2-4d4e557568f0 nodeName:}" failed. No retries permitted until 2026-01-28 16:57:31.755713642 +0000 UTC m=+1355.314040520 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift") pod "swift-storage-0" (UID: "03f31376-451f-4360-bea2-4d4e557568f0") : configmap "swift-ring-files" not found Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.628380 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-sp2zz"] Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.630729 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.633769 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.636886 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.637048 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.646013 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-sp2zz"] Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.702747 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.702866 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-ring-data-devices\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.702919 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.702950 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h4sv\" (UniqueName: \"kubernetes.io/projected/f8872319-aeb2-4f15-bfc5-3e9abd62770e-kube-api-access-4h4sv\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.702975 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-swiftconf\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.702998 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-dispersionconf\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.703017 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8872319-aeb2-4f15-bfc5-3e9abd62770e-etc-swift\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.805508 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-ring-data-devices\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.805603 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.805640 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h4sv\" (UniqueName: \"kubernetes.io/projected/f8872319-aeb2-4f15-bfc5-3e9abd62770e-kube-api-access-4h4sv\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.805690 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-swiftconf\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.805712 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-dispersionconf\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.805939 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8872319-aeb2-4f15-bfc5-3e9abd62770e-etc-swift\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.805990 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.806050 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: E0128 16:57:31.806535 4877 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 16:57:31 crc kubenswrapper[4877]: E0128 16:57:31.806652 4877 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 16:57:31 crc kubenswrapper[4877]: E0128 16:57:31.806778 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift podName:03f31376-451f-4360-bea2-4d4e557568f0 nodeName:}" failed. No retries permitted until 2026-01-28 16:57:35.806756146 +0000 UTC m=+1359.365083034 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift") pod "swift-storage-0" (UID: "03f31376-451f-4360-bea2-4d4e557568f0") : configmap "swift-ring-files" not found Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.989994 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-swiftconf\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:31 crc kubenswrapper[4877]: I0128 16:57:31.991137 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:32 crc kubenswrapper[4877]: I0128 16:57:32.010931 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:32 crc kubenswrapper[4877]: I0128 16:57:32.012310 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-dispersionconf\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:32 crc kubenswrapper[4877]: I0128 16:57:32.017552 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8872319-aeb2-4f15-bfc5-3e9abd62770e-etc-swift\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:32 crc kubenswrapper[4877]: I0128 16:57:32.017969 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-ring-data-devices\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:32 crc kubenswrapper[4877]: I0128 16:57:32.019898 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h4sv\" (UniqueName: \"kubernetes.io/projected/f8872319-aeb2-4f15-bfc5-3e9abd62770e-kube-api-access-4h4sv\") pod \"swift-ring-rebalance-sp2zz\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:32 crc kubenswrapper[4877]: I0128 16:57:32.316021 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:57:35 crc kubenswrapper[4877]: I0128 16:57:35.078162 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Jan 28 16:57:35 crc kubenswrapper[4877]: I0128 16:57:35.908259 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:35 crc kubenswrapper[4877]: E0128 16:57:35.908503 4877 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 16:57:35 crc kubenswrapper[4877]: E0128 16:57:35.908712 4877 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 16:57:35 crc kubenswrapper[4877]: E0128 16:57:35.908800 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift podName:03f31376-451f-4360-bea2-4d4e557568f0 nodeName:}" failed. No retries permitted until 2026-01-28 16:57:43.908762712 +0000 UTC m=+1367.467089600 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift") pod "swift-storage-0" (UID: "03f31376-451f-4360-bea2-4d4e557568f0") : configmap "swift-ring-files" not found Jan 28 16:57:39 crc kubenswrapper[4877]: W0128 16:57:39.607565 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3bf40370_b176_4eae_bab1_9f55f99132ce.slice/crio-add725033abd8bfba774a07490a384e207d84934fa83911025545af94787f58b WatchSource:0}: Error finding container add725033abd8bfba774a07490a384e207d84934fa83911025545af94787f58b: Status 404 returned error can't find the container with id add725033abd8bfba774a07490a384e207d84934fa83911025545af94787f58b Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.701457 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.776438 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" event={"ID":"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa","Type":"ContainerStarted","Data":"946f1c46593991addee26ba37d7d7bd35b507717226d8c269a0515933992a24f"} Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.778437 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" event={"ID":"3bf40370-b176-4eae-bab1-9f55f99132ce","Type":"ContainerStarted","Data":"add725033abd8bfba774a07490a384e207d84934fa83911025545af94787f58b"} Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.779853 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" event={"ID":"94736b29-94ba-4356-8b0c-439047f52fdc","Type":"ContainerDied","Data":"b451836f4aae9a838953d3edc482eb77013e7f3d5c2e4152718e23680e4ad5c3"} Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.779905 4877 scope.go:117] "RemoveContainer" containerID="35bcf03f3551ef4102abffe6b71d6a260d9257863106c329a069d6591ce10497" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.779907 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xlsv5" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.800360 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-256pn\" (UniqueName: \"kubernetes.io/projected/94736b29-94ba-4356-8b0c-439047f52fdc-kube-api-access-256pn\") pod \"94736b29-94ba-4356-8b0c-439047f52fdc\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.800437 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-dns-svc\") pod \"94736b29-94ba-4356-8b0c-439047f52fdc\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.800637 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-config\") pod \"94736b29-94ba-4356-8b0c-439047f52fdc\" (UID: \"94736b29-94ba-4356-8b0c-439047f52fdc\") " Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.808823 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94736b29-94ba-4356-8b0c-439047f52fdc-kube-api-access-256pn" (OuterVolumeSpecName: "kube-api-access-256pn") pod "94736b29-94ba-4356-8b0c-439047f52fdc" (UID: "94736b29-94ba-4356-8b0c-439047f52fdc"). InnerVolumeSpecName "kube-api-access-256pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.825977 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-config" (OuterVolumeSpecName: "config") pod "94736b29-94ba-4356-8b0c-439047f52fdc" (UID: "94736b29-94ba-4356-8b0c-439047f52fdc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.826767 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "94736b29-94ba-4356-8b0c-439047f52fdc" (UID: "94736b29-94ba-4356-8b0c-439047f52fdc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.906000 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.906042 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-256pn\" (UniqueName: \"kubernetes.io/projected/94736b29-94ba-4356-8b0c-439047f52fdc-kube-api-access-256pn\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.906054 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94736b29-94ba-4356-8b0c-439047f52fdc-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:39 crc kubenswrapper[4877]: I0128 16:57:39.959503 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.078750 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.078911 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.113659 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-dns-svc\") pod \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.113875 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znzfc\" (UniqueName: \"kubernetes.io/projected/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-kube-api-access-znzfc\") pod \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.113915 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-config\") pod \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\" (UID: \"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343\") " Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.124393 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-kube-api-access-znzfc" (OuterVolumeSpecName: "kube-api-access-znzfc") pod "477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" (UID: "477ae0b4-6cca-42b1-b0fd-6e3bba2a3343"). InnerVolumeSpecName "kube-api-access-znzfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.147998 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xlsv5"] Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.159855 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xlsv5"] Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.183000 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" (UID: "477ae0b4-6cca-42b1-b0fd-6e3bba2a3343"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.197571 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-config" (OuterVolumeSpecName: "config") pod "477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" (UID: "477ae0b4-6cca-42b1-b0fd-6e3bba2a3343"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.216600 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.216640 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znzfc\" (UniqueName: \"kubernetes.io/projected/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-kube-api-access-znzfc\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.216656 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:40 crc kubenswrapper[4877]: E0128 16:57:40.222817 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Jan 28 16:57:40 crc kubenswrapper[4877]: E0128 16:57:40.223007 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n546h5c6h555h5f7h5b5h59h8ch56bh666h5d9h6dh566h64dh646hd7h68bh54fh597h5c8h86h79h5fh6hf5hc9h5ddh5cfh664h57dh5d6h54fh5q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n6tb9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-sb-0_openstack(813d900a-8a7c-4cef-b418-3a1f5eb28f69): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:40 crc kubenswrapper[4877]: E0128 16:57:40.224323 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-sb-0" podUID="813d900a-8a7c-4cef-b418-3a1f5eb28f69" Jan 28 16:57:40 crc kubenswrapper[4877]: E0128 16:57:40.276641 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Jan 28 16:57:40 crc kubenswrapper[4877]: E0128 16:57:40.277227 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n556h78h5f4h687hd4h99hd4h94h668hbch55h656h59dhb6h86h64hb9h66ch55ch5c6h665h55dh694h648h568h55hbfh5cbh77hf9h65h9cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7j95m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(48287337-b9e0-4ad0-8db3-00cf201ca4cf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:57:40 crc kubenswrapper[4877]: E0128 16:57:40.278545 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="48287337-b9e0-4ad0-8db3-00cf201ca4cf" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.797256 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.797448 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qvfkk" event={"ID":"477ae0b4-6cca-42b1-b0fd-6e3bba2a3343","Type":"ContainerDied","Data":"6671c185e5d92216f56323ffd65b7c95aa6ee6da353d04bc160923dbb5e5f565"} Jan 28 16:57:40 crc kubenswrapper[4877]: I0128 16:57:40.797699 4877 scope.go:117] "RemoveContainer" containerID="67f211e7bc66c98c3ae3e2225ebb9e27f31f6723c005f67712f924edd3b8d1e3" Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:40.921961 4877 scope.go:117] "RemoveContainer" containerID="9e46c726f583b44c76d276563eba6656c2bf3e9b133df45aa51bb78c4d6e0846" Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:40.938147 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qvfkk"] Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:40.953052 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qvfkk"] Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.009537 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-sp2zz"] Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.187290 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2nqdn"] Jan 28 16:57:41 crc kubenswrapper[4877]: W0128 16:57:41.258789 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88801c74_cfbd_4eee_936b_2899b69196aa.slice/crio-61a929767be35d841e2256976c5b376fd6c95210a37ea8d0beee52968e41668f WatchSource:0}: Error finding container 61a929767be35d841e2256976c5b376fd6c95210a37ea8d0beee52968e41668f: Status 404 returned error can't find the container with id 61a929767be35d841e2256976c5b376fd6c95210a37ea8d0beee52968e41668f Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.351998 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" path="/var/lib/kubelet/pods/477ae0b4-6cca-42b1-b0fd-6e3bba2a3343/volumes" Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.352786 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94736b29-94ba-4356-8b0c-439047f52fdc" path="/var/lib/kubelet/pods/94736b29-94ba-4356-8b0c-439047f52fdc/volumes" Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.808697 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sp2zz" event={"ID":"f8872319-aeb2-4f15-bfc5-3e9abd62770e","Type":"ContainerStarted","Data":"713ff8386c5b9964c01045112e062041782b8750e74aad36568a14d8f4fcc302"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.810825 4877 generic.go:334] "Generic (PLEG): container finished" podID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerID="04e0c1a3a7bb75708118420d020e8a25d27589cc1f9c7f1ae8beb17f007e9e79" exitCode=0 Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.810915 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" event={"ID":"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa","Type":"ContainerDied","Data":"04e0c1a3a7bb75708118420d020e8a25d27589cc1f9c7f1ae8beb17f007e9e79"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.814012 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4hfbx" event={"ID":"1dbf58c0-9bbf-43dd-8438-98c5f16c1146","Type":"ContainerStarted","Data":"2b5eb8e5a92650711526f9028319b3eed98b699804ed0a778b27dc9d38c49cb3"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.816093 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ggcj2" event={"ID":"97413ef2-e41a-453c-b6d1-5d93c7d72274","Type":"ContainerStarted","Data":"eb9c1f1eed22ecee50d6e607e1902b64d58095fccb4a24807e48d707c2cc1a01"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.816306 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ggcj2" Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.818733 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2nqdn" event={"ID":"88801c74-cfbd-4eee-936b-2899b69196aa","Type":"ContainerStarted","Data":"61a929767be35d841e2256976c5b376fd6c95210a37ea8d0beee52968e41668f"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.822099 4877 generic.go:334] "Generic (PLEG): container finished" podID="3bf40370-b176-4eae-bab1-9f55f99132ce" containerID="941825cc5e05cd43d1b67d5219626644b19c1276c27ef476ac16114c25464a91" exitCode=0 Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.822154 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" event={"ID":"3bf40370-b176-4eae-bab1-9f55f99132ce","Type":"ContainerDied","Data":"941825cc5e05cd43d1b67d5219626644b19c1276c27ef476ac16114c25464a91"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.841511 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"813d900a-8a7c-4cef-b418-3a1f5eb28f69","Type":"ContainerStarted","Data":"9a977e6bf328d41a550920253c209ca69b7720733f5b67458bcc1ec5f9c2c16f"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.846608 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6cf870d4-330c-490b-8fcc-77028d084de4","Type":"ContainerStarted","Data":"4cd2ccacff1d57aa9c2c97b5c1b2e292ea115ad3b38c226993097be4c2d99aa0"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.855102 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c","Type":"ContainerStarted","Data":"344654039e6da001213d4d8b96780f76b0a56f7fdd933e538efad2f66f0c421c"} Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.874903 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-4hfbx" podStartSLOduration=9.15311826 podStartE2EDuration="17.874855938s" podCreationTimestamp="2026-01-28 16:57:24 +0000 UTC" firstStartedPulling="2026-01-28 16:57:32.00035734 +0000 UTC m=+1355.558684248" lastFinishedPulling="2026-01-28 16:57:40.722095038 +0000 UTC m=+1364.280421926" observedRunningTime="2026-01-28 16:57:41.872250069 +0000 UTC m=+1365.430576997" watchObservedRunningTime="2026-01-28 16:57:41.874855938 +0000 UTC m=+1365.433182826" Jan 28 16:57:41 crc kubenswrapper[4877]: I0128 16:57:41.897830 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ggcj2" podStartSLOduration=2.788076041 podStartE2EDuration="1m1.897808073s" podCreationTimestamp="2026-01-28 16:56:40 +0000 UTC" firstStartedPulling="2026-01-28 16:56:41.309777193 +0000 UTC m=+1304.868104081" lastFinishedPulling="2026-01-28 16:57:40.419509225 +0000 UTC m=+1363.977836113" observedRunningTime="2026-01-28 16:57:41.896687883 +0000 UTC m=+1365.455014771" watchObservedRunningTime="2026-01-28 16:57:41.897808073 +0000 UTC m=+1365.456134981" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.025733 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=57.373759334 podStartE2EDuration="1m0.025695597s" podCreationTimestamp="2026-01-28 16:56:42 +0000 UTC" firstStartedPulling="2026-01-28 16:57:13.814571423 +0000 UTC m=+1337.372898311" lastFinishedPulling="2026-01-28 16:57:16.466507676 +0000 UTC m=+1340.024834574" observedRunningTime="2026-01-28 16:57:42.003685408 +0000 UTC m=+1365.562012296" watchObservedRunningTime="2026-01-28 16:57:42.025695597 +0000 UTC m=+1365.584022485" Jan 28 16:57:42 crc kubenswrapper[4877]: E0128 16:57:42.482747 4877 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 28 16:57:42 crc kubenswrapper[4877]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 28 16:57:42 crc kubenswrapper[4877]: > podSandboxID="946f1c46593991addee26ba37d7d7bd35b507717226d8c269a0515933992a24f" Jan 28 16:57:42 crc kubenswrapper[4877]: E0128 16:57:42.483147 4877 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 28 16:57:42 crc kubenswrapper[4877]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n599h5cbh7ch5d4h66fh676hdbh546h95h88h5ffh55ch7fhch57ch687hddhc7h5fdh57dh674h56fh64ch98h9bh557h55dh646h54ch54fh5c4h597q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qhlnx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-86db49b7ff-g7gld_openstack(798f6b4e-5b70-4d96-93bc-a6ddbae18eaa): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 28 16:57:42 crc kubenswrapper[4877]: > logger="UnhandledError" Jan 28 16:57:42 crc kubenswrapper[4877]: E0128 16:57:42.484400 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.526805 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.621991 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgp55\" (UniqueName: \"kubernetes.io/projected/3bf40370-b176-4eae-bab1-9f55f99132ce-kube-api-access-lgp55\") pod \"3bf40370-b176-4eae-bab1-9f55f99132ce\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.623135 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-ovsdbserver-nb\") pod \"3bf40370-b176-4eae-bab1-9f55f99132ce\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.623404 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-config\") pod \"3bf40370-b176-4eae-bab1-9f55f99132ce\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.623554 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-dns-svc\") pod \"3bf40370-b176-4eae-bab1-9f55f99132ce\" (UID: \"3bf40370-b176-4eae-bab1-9f55f99132ce\") " Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.632926 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bf40370-b176-4eae-bab1-9f55f99132ce-kube-api-access-lgp55" (OuterVolumeSpecName: "kube-api-access-lgp55") pod "3bf40370-b176-4eae-bab1-9f55f99132ce" (UID: "3bf40370-b176-4eae-bab1-9f55f99132ce"). InnerVolumeSpecName "kube-api-access-lgp55". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.662794 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3bf40370-b176-4eae-bab1-9f55f99132ce" (UID: "3bf40370-b176-4eae-bab1-9f55f99132ce"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.728035 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgp55\" (UniqueName: \"kubernetes.io/projected/3bf40370-b176-4eae-bab1-9f55f99132ce-kube-api-access-lgp55\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.728082 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.825500 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.866572 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" event={"ID":"3bf40370-b176-4eae-bab1-9f55f99132ce","Type":"ContainerDied","Data":"add725033abd8bfba774a07490a384e207d84934fa83911025545af94787f58b"} Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.866628 4877 scope.go:117] "RemoveContainer" containerID="941825cc5e05cd43d1b67d5219626644b19c1276c27ef476ac16114c25464a91" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.866748 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-nrkf6" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.870912 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"48287337-b9e0-4ad0-8db3-00cf201ca4cf","Type":"ContainerStarted","Data":"54e9b96d6b59b37ac6805613e644727bc1cab9e4ffde416a2f29e9b4ee1de8ab"} Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.875705 4877 generic.go:334] "Generic (PLEG): container finished" podID="88801c74-cfbd-4eee-936b-2899b69196aa" containerID="b1c85c55edc6ef969b940e4a77ab042c02a5f3e415e516c9b2407efd48050836" exitCode=0 Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.876788 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2nqdn" event={"ID":"88801c74-cfbd-4eee-936b-2899b69196aa","Type":"ContainerDied","Data":"b1c85c55edc6ef969b940e4a77ab042c02a5f3e415e516c9b2407efd48050836"} Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.916435 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=45.641816337 podStartE2EDuration="1m4.9164059s" podCreationTimestamp="2026-01-28 16:56:38 +0000 UTC" firstStartedPulling="2026-01-28 16:56:54.541049569 +0000 UTC m=+1318.099376497" lastFinishedPulling="2026-01-28 16:57:13.815639172 +0000 UTC m=+1337.373966060" observedRunningTime="2026-01-28 16:57:42.903427022 +0000 UTC m=+1366.461753920" watchObservedRunningTime="2026-01-28 16:57:42.9164059 +0000 UTC m=+1366.474732788" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.966712 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-config" (OuterVolumeSpecName: "config") pod "3bf40370-b176-4eae-bab1-9f55f99132ce" (UID: "3bf40370-b176-4eae-bab1-9f55f99132ce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:42 crc kubenswrapper[4877]: I0128 16:57:42.990701 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.037254 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.262899 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3bf40370-b176-4eae-bab1-9f55f99132ce" (UID: "3bf40370-b176-4eae-bab1-9f55f99132ce"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.345105 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3bf40370-b176-4eae-bab1-9f55f99132ce-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.599820 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-nrkf6"] Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.641649 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-nrkf6"] Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.708854 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.708921 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.911906 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.931259 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.961404 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:57:43 crc kubenswrapper[4877]: E0128 16:57:43.961711 4877 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 16:57:43 crc kubenswrapper[4877]: E0128 16:57:43.961758 4877 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 16:57:43 crc kubenswrapper[4877]: E0128 16:57:43.961844 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift podName:03f31376-451f-4360-bea2-4d4e557568f0 nodeName:}" failed. No retries permitted until 2026-01-28 16:57:59.961814096 +0000 UTC m=+1383.520140984 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift") pod "swift-storage-0" (UID: "03f31376-451f-4360-bea2-4d4e557568f0") : configmap "swift-ring-files" not found Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.995092 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 28 16:57:43 crc kubenswrapper[4877]: I0128 16:57:43.998822 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.301872 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 28 16:57:44 crc kubenswrapper[4877]: E0128 16:57:44.302496 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bf40370-b176-4eae-bab1-9f55f99132ce" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.302522 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bf40370-b176-4eae-bab1-9f55f99132ce" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: E0128 16:57:44.302546 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.302555 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: E0128 16:57:44.302565 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="dnsmasq-dns" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.302574 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="dnsmasq-dns" Jan 28 16:57:44 crc kubenswrapper[4877]: E0128 16:57:44.302595 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94736b29-94ba-4356-8b0c-439047f52fdc" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.302603 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="94736b29-94ba-4356-8b0c-439047f52fdc" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.302853 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="94736b29-94ba-4356-8b0c-439047f52fdc" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.302881 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bf40370-b176-4eae-bab1-9f55f99132ce" containerName="init" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.302904 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="477ae0b4-6cca-42b1-b0fd-6e3bba2a3343" containerName="dnsmasq-dns" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.304640 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.307665 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.307761 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.307665 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.316863 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.329852 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-6k82q" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.481061 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/677f91cd-9b29-4a73-8b8c-598e5029fa7e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.481149 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h29zf\" (UniqueName: \"kubernetes.io/projected/677f91cd-9b29-4a73-8b8c-598e5029fa7e-kube-api-access-h29zf\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.481469 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.481616 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.481648 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/677f91cd-9b29-4a73-8b8c-598e5029fa7e-scripts\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.481663 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.482190 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/677f91cd-9b29-4a73-8b8c-598e5029fa7e-config\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.585040 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.585111 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.585137 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/677f91cd-9b29-4a73-8b8c-598e5029fa7e-scripts\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.585159 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.585276 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/677f91cd-9b29-4a73-8b8c-598e5029fa7e-config\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.585321 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/677f91cd-9b29-4a73-8b8c-598e5029fa7e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.585367 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h29zf\" (UniqueName: \"kubernetes.io/projected/677f91cd-9b29-4a73-8b8c-598e5029fa7e-kube-api-access-h29zf\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.586356 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/677f91cd-9b29-4a73-8b8c-598e5029fa7e-scripts\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.586367 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/677f91cd-9b29-4a73-8b8c-598e5029fa7e-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.586669 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/677f91cd-9b29-4a73-8b8c-598e5029fa7e-config\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.595649 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.602268 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.602695 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/677f91cd-9b29-4a73-8b8c-598e5029fa7e-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.604321 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h29zf\" (UniqueName: \"kubernetes.io/projected/677f91cd-9b29-4a73-8b8c-598e5029fa7e-kube-api-access-h29zf\") pod \"ovn-northd-0\" (UID: \"677f91cd-9b29-4a73-8b8c-598e5029fa7e\") " pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.647396 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.959691 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" event={"ID":"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa","Type":"ContainerStarted","Data":"3b80ff6004d25625cee197d1a4284bb7158695ccdc578091065cb3355d16fb0d"} Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.960157 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.972036 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerStarted","Data":"667938be49db0d327863df1e58495f8cc87ed7b4d96182a54a06a44874fcd16b"} Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.978801 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2nqdn" event={"ID":"88801c74-cfbd-4eee-936b-2899b69196aa","Type":"ContainerStarted","Data":"dc5d6c2b8dbc826f1d1cae192b2f9f09346b7c6d555dec08b31640ac0a78e616"} Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.978848 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:44 crc kubenswrapper[4877]: I0128 16:57:44.994726 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" podStartSLOduration=20.994702806 podStartE2EDuration="20.994702806s" podCreationTimestamp="2026-01-28 16:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:57:44.988708015 +0000 UTC m=+1368.547034913" watchObservedRunningTime="2026-01-28 16:57:44.994702806 +0000 UTC m=+1368.553029694" Jan 28 16:57:45 crc kubenswrapper[4877]: I0128 16:57:45.019374 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-2nqdn" podStartSLOduration=19.019348345 podStartE2EDuration="19.019348345s" podCreationTimestamp="2026-01-28 16:57:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:57:45.012150963 +0000 UTC m=+1368.570477861" watchObservedRunningTime="2026-01-28 16:57:45.019348345 +0000 UTC m=+1368.577675233" Jan 28 16:57:45 crc kubenswrapper[4877]: I0128 16:57:45.343222 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bf40370-b176-4eae-bab1-9f55f99132ce" path="/var/lib/kubelet/pods/3bf40370-b176-4eae-bab1-9f55f99132ce/volumes" Jan 28 16:57:46 crc kubenswrapper[4877]: I0128 16:57:46.996367 4877 generic.go:334] "Generic (PLEG): container finished" podID="6cf870d4-330c-490b-8fcc-77028d084de4" containerID="4cd2ccacff1d57aa9c2c97b5c1b2e292ea115ad3b38c226993097be4c2d99aa0" exitCode=0 Jan 28 16:57:46 crc kubenswrapper[4877]: I0128 16:57:46.996437 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6cf870d4-330c-490b-8fcc-77028d084de4","Type":"ContainerDied","Data":"4cd2ccacff1d57aa9c2c97b5c1b2e292ea115ad3b38c226993097be4c2d99aa0"} Jan 28 16:57:46 crc kubenswrapper[4877]: I0128 16:57:46.999947 4877 generic.go:334] "Generic (PLEG): container finished" podID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerID="344654039e6da001213d4d8b96780f76b0a56f7fdd933e538efad2f66f0c421c" exitCode=0 Jan 28 16:57:47 crc kubenswrapper[4877]: I0128 16:57:47.000049 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c","Type":"ContainerDied","Data":"344654039e6da001213d4d8b96780f76b0a56f7fdd933e538efad2f66f0c421c"} Jan 28 16:57:47 crc kubenswrapper[4877]: I0128 16:57:47.146209 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 16:57:48 crc kubenswrapper[4877]: I0128 16:57:48.031808 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c","Type":"ContainerStarted","Data":"93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af"} Jan 28 16:57:48 crc kubenswrapper[4877]: I0128 16:57:48.107031 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"677f91cd-9b29-4a73-8b8c-598e5029fa7e","Type":"ContainerStarted","Data":"88418db7e07e12c386901841fb306d35d75f8ed037e3d6d6125824bd7295eb9c"} Jan 28 16:57:48 crc kubenswrapper[4877]: I0128 16:57:48.110495 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sp2zz" event={"ID":"f8872319-aeb2-4f15-bfc5-3e9abd62770e","Type":"ContainerStarted","Data":"8959a1835ff54e5295927b5f85d9061f6a9a20238e82275c21b10eab840d361c"} Jan 28 16:57:48 crc kubenswrapper[4877]: I0128 16:57:48.119851 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6cf870d4-330c-490b-8fcc-77028d084de4","Type":"ContainerStarted","Data":"cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61"} Jan 28 16:57:48 crc kubenswrapper[4877]: I0128 16:57:48.152762 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=10.195268446 podStartE2EDuration="1m16.151943115s" podCreationTimestamp="2026-01-28 16:56:32 +0000 UTC" firstStartedPulling="2026-01-28 16:56:34.369764834 +0000 UTC m=+1297.928091722" lastFinishedPulling="2026-01-28 16:57:40.326439503 +0000 UTC m=+1363.884766391" observedRunningTime="2026-01-28 16:57:48.120553904 +0000 UTC m=+1371.678880812" watchObservedRunningTime="2026-01-28 16:57:48.151943115 +0000 UTC m=+1371.710270023" Jan 28 16:57:48 crc kubenswrapper[4877]: I0128 16:57:48.163061 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-sp2zz" podStartSLOduration=11.587796951 podStartE2EDuration="17.163014302s" podCreationTimestamp="2026-01-28 16:57:31 +0000 UTC" firstStartedPulling="2026-01-28 16:57:41.165862452 +0000 UTC m=+1364.724189340" lastFinishedPulling="2026-01-28 16:57:46.741079803 +0000 UTC m=+1370.299406691" observedRunningTime="2026-01-28 16:57:48.149057398 +0000 UTC m=+1371.707384456" watchObservedRunningTime="2026-01-28 16:57:48.163014302 +0000 UTC m=+1371.721341180" Jan 28 16:57:48 crc kubenswrapper[4877]: I0128 16:57:48.180141 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=10.506445532 podStartE2EDuration="1m18.180081678s" podCreationTimestamp="2026-01-28 16:56:30 +0000 UTC" firstStartedPulling="2026-01-28 16:56:32.945068873 +0000 UTC m=+1296.503395761" lastFinishedPulling="2026-01-28 16:57:40.618705019 +0000 UTC m=+1364.177031907" observedRunningTime="2026-01-28 16:57:48.175607159 +0000 UTC m=+1371.733934067" watchObservedRunningTime="2026-01-28 16:57:48.180081678 +0000 UTC m=+1371.738408566" Jan 28 16:57:49 crc kubenswrapper[4877]: I0128 16:57:49.131304 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"677f91cd-9b29-4a73-8b8c-598e5029fa7e","Type":"ContainerStarted","Data":"dd843aa0f52cc9166c4eda88ad92c4a575b40fcde6810cac0e69f3e6056aa37c"} Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.142704 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"677f91cd-9b29-4a73-8b8c-598e5029fa7e","Type":"ContainerStarted","Data":"6a8679f30bfa22fbc85f07f64c5d27c1fef9a63a2bf40d7dce957dd850a378c3"} Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.143207 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.144373 4877 generic.go:334] "Generic (PLEG): container finished" podID="d96b5016-3ed4-4f98-8708-f69092894981" containerID="52942a240b967e15d227027e6b2bf1fa3d2ce227a325fd22c64b791144cfc420" exitCode=0 Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.144433 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"d96b5016-3ed4-4f98-8708-f69092894981","Type":"ContainerDied","Data":"52942a240b967e15d227027e6b2bf1fa3d2ce227a325fd22c64b791144cfc420"} Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.146804 4877 generic.go:334] "Generic (PLEG): container finished" podID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerID="c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310" exitCode=0 Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.146837 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2f642a61-430e-4dfc-b6b6-3ee68161eaf6","Type":"ContainerDied","Data":"c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310"} Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.195720 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=4.800156932 podStartE2EDuration="6.195687774s" podCreationTimestamp="2026-01-28 16:57:44 +0000 UTC" firstStartedPulling="2026-01-28 16:57:47.169862165 +0000 UTC m=+1370.728189053" lastFinishedPulling="2026-01-28 16:57:48.565393007 +0000 UTC m=+1372.123719895" observedRunningTime="2026-01-28 16:57:50.177001864 +0000 UTC m=+1373.735328752" watchObservedRunningTime="2026-01-28 16:57:50.195687774 +0000 UTC m=+1373.754014692" Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.361317 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.713233 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.754647 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vlhkh" Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.963863 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ggcj2-config-967nw"] Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.965231 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.967789 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 28 16:57:50 crc kubenswrapper[4877]: I0128 16:57:50.987888 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ggcj2-config-967nw"] Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.009546 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run-ovn\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.010743 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-scripts\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.010860 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-log-ovn\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.011026 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-additional-scripts\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.011161 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x4p2\" (UniqueName: \"kubernetes.io/projected/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-kube-api-access-9x4p2\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.011264 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.113549 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x4p2\" (UniqueName: \"kubernetes.io/projected/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-kube-api-access-9x4p2\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.113652 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.113699 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run-ovn\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.113725 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-scripts\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.113776 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-log-ovn\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.113871 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-additional-scripts\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.114329 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run-ovn\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.114406 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-log-ovn\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.114351 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.114714 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-additional-scripts\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.116405 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-scripts\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.133229 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x4p2\" (UniqueName: \"kubernetes.io/projected/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-kube-api-access-9x4p2\") pod \"ovn-controller-ggcj2-config-967nw\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.162400 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"d96b5016-3ed4-4f98-8708-f69092894981","Type":"ContainerStarted","Data":"f5725047d30bdfc1c6d9311e0bc73bc548184731c937882127b2785a7410e9fc"} Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.165214 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2f642a61-430e-4dfc-b6b6-3ee68161eaf6","Type":"ContainerStarted","Data":"ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906"} Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.282025 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:51 crc kubenswrapper[4877]: I0128 16:57:51.871430 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ggcj2-config-967nw"] Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.179029 4877 generic.go:334] "Generic (PLEG): container finished" podID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerID="667938be49db0d327863df1e58495f8cc87ed7b4d96182a54a06a44874fcd16b" exitCode=0 Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.179114 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerDied","Data":"667938be49db0d327863df1e58495f8cc87ed7b4d96182a54a06a44874fcd16b"} Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.182207 4877 generic.go:334] "Generic (PLEG): container finished" podID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerID="4814d6a51261f73fb6d1a6d3f616147b0069871f1e5588245254e80036c4c35d" exitCode=0 Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.182300 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"951f6a86-2dbc-402b-bb10-9a16d347c697","Type":"ContainerDied","Data":"4814d6a51261f73fb6d1a6d3f616147b0069871f1e5588245254e80036c4c35d"} Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.186117 4877 generic.go:334] "Generic (PLEG): container finished" podID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerID="8e0fb86256a77a22a05970915add645a2a85e79bc329b01962f4b68e24315021" exitCode=0 Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.186262 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5d261b3a-c6f9-48bd-92de-b76d3821e778","Type":"ContainerDied","Data":"8e0fb86256a77a22a05970915add645a2a85e79bc329b01962f4b68e24315021"} Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.191305 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.212146 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ggcj2-config-967nw" event={"ID":"7f80af7e-9055-4cf7-a345-00fb9bef9e7f","Type":"ContainerStarted","Data":"eba3d768b256000014c86d9a232cb62d8d3d6a4717f2cc98c582a5002e60ef3f"} Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.212799 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.213268 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.340742 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.342707 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.381025 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=40.708795597 podStartE2EDuration="1m23.381002656s" podCreationTimestamp="2026-01-28 16:56:29 +0000 UTC" firstStartedPulling="2026-01-28 16:56:31.581699176 +0000 UTC m=+1295.140026064" lastFinishedPulling="2026-01-28 16:57:14.253906235 +0000 UTC m=+1337.812233123" observedRunningTime="2026-01-28 16:57:52.376577488 +0000 UTC m=+1375.934904376" watchObservedRunningTime="2026-01-28 16:57:52.381002656 +0000 UTC m=+1375.939329544" Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.489849 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-g7gld"] Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.490114 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerName="dnsmasq-dns" containerID="cri-o://3b80ff6004d25625cee197d1a4284bb7158695ccdc578091065cb3355d16fb0d" gracePeriod=10 Jan 28 16:57:52 crc kubenswrapper[4877]: I0128 16:57:52.499351 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=39.997572406 podStartE2EDuration="1m23.499329195s" podCreationTimestamp="2026-01-28 16:56:29 +0000 UTC" firstStartedPulling="2026-01-28 16:56:31.58262914 +0000 UTC m=+1295.140956028" lastFinishedPulling="2026-01-28 16:57:15.084385939 +0000 UTC m=+1338.642712817" observedRunningTime="2026-01-28 16:57:52.46143381 +0000 UTC m=+1376.019760718" watchObservedRunningTime="2026-01-28 16:57:52.499329195 +0000 UTC m=+1376.057656083" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.226104 4877 generic.go:334] "Generic (PLEG): container finished" podID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerID="3b80ff6004d25625cee197d1a4284bb7158695ccdc578091065cb3355d16fb0d" exitCode=0 Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.226247 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" event={"ID":"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa","Type":"ContainerDied","Data":"3b80ff6004d25625cee197d1a4284bb7158695ccdc578091065cb3355d16fb0d"} Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.227641 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" event={"ID":"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa","Type":"ContainerDied","Data":"946f1c46593991addee26ba37d7d7bd35b507717226d8c269a0515933992a24f"} Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.227706 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="946f1c46593991addee26ba37d7d7bd35b507717226d8c269a0515933992a24f" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.236304 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"951f6a86-2dbc-402b-bb10-9a16d347c697","Type":"ContainerStarted","Data":"8ef2c662e9cfafd69c72e1ddd8c5e00cc88fd087a6b6090f6f2bd73fa71f67ce"} Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.237772 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.240642 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5d261b3a-c6f9-48bd-92de-b76d3821e778","Type":"ContainerStarted","Data":"793925a2d2bcdc32855957ec2a7488bda261e25aa749028fb609ae0ca451af05"} Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.241037 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.242645 4877 generic.go:334] "Generic (PLEG): container finished" podID="7f80af7e-9055-4cf7-a345-00fb9bef9e7f" containerID="16783a021d29afc65113e8ae157e172c931d0449e874d38f664cdfd0e5518531" exitCode=0 Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.242775 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ggcj2-config-967nw" event={"ID":"7f80af7e-9055-4cf7-a345-00fb9bef9e7f","Type":"ContainerDied","Data":"16783a021d29afc65113e8ae157e172c931d0449e874d38f664cdfd0e5518531"} Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.313547 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371952.541256 podStartE2EDuration="1m24.313519298s" podCreationTimestamp="2026-01-28 16:56:29 +0000 UTC" firstStartedPulling="2026-01-28 16:56:31.844971562 +0000 UTC m=+1295.403298450" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:57:53.291274812 +0000 UTC m=+1376.849601710" watchObservedRunningTime="2026-01-28 16:57:53.313519298 +0000 UTC m=+1376.871846186" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.321902 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.347848 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371952.506945 podStartE2EDuration="1m24.347830747s" podCreationTimestamp="2026-01-28 16:56:29 +0000 UTC" firstStartedPulling="2026-01-28 16:56:31.897807638 +0000 UTC m=+1295.456134526" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:57:53.332505087 +0000 UTC m=+1376.890831975" watchObservedRunningTime="2026-01-28 16:57:53.347830747 +0000 UTC m=+1376.906157635" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.395677 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-nb\") pod \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.395759 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config\") pod \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.395853 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhlnx\" (UniqueName: \"kubernetes.io/projected/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-kube-api-access-qhlnx\") pod \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.395874 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb\") pod \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.395898 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-dns-svc\") pod \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.414942 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-kube-api-access-qhlnx" (OuterVolumeSpecName: "kube-api-access-qhlnx") pod "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" (UID: "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa"). InnerVolumeSpecName "kube-api-access-qhlnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.477122 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" (UID: "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.498844 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" (UID: "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.510222 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb\") pod \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " Jan 28 16:57:53 crc kubenswrapper[4877]: W0128 16:57:53.513210 4877 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa/volumes/kubernetes.io~configmap/ovsdbserver-sb Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.513248 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" (UID: "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.519125 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config" (OuterVolumeSpecName: "config") pod "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" (UID: "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.519714 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config\") pod \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\" (UID: \"798f6b4e-5b70-4d96-93bc-a6ddbae18eaa\") " Jan 28 16:57:53 crc kubenswrapper[4877]: W0128 16:57:53.520776 4877 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa/volumes/kubernetes.io~configmap/config Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.520797 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config" (OuterVolumeSpecName: "config") pod "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" (UID: "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.530350 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.530415 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhlnx\" (UniqueName: \"kubernetes.io/projected/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-kube-api-access-qhlnx\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.530436 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.530454 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.555175 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.555216 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.602772 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" (UID: "798f6b4e-5b70-4d96-93bc-a6ddbae18eaa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.632224 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:53 crc kubenswrapper[4877]: I0128 16:57:53.705524 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.258925 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-g7gld" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.296209 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-g7gld"] Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.318439 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-g7gld"] Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.371891 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.880567 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.968644 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-scripts\") pod \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.968794 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run-ovn\") pod \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.968879 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run\") pod \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.968927 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-log-ovn\") pod \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.969326 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-additional-scripts\") pod \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.969422 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x4p2\" (UniqueName: \"kubernetes.io/projected/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-kube-api-access-9x4p2\") pod \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\" (UID: \"7f80af7e-9055-4cf7-a345-00fb9bef9e7f\") " Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.969616 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run" (OuterVolumeSpecName: "var-run") pod "7f80af7e-9055-4cf7-a345-00fb9bef9e7f" (UID: "7f80af7e-9055-4cf7-a345-00fb9bef9e7f"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.969721 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "7f80af7e-9055-4cf7-a345-00fb9bef9e7f" (UID: "7f80af7e-9055-4cf7-a345-00fb9bef9e7f"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.969729 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "7f80af7e-9055-4cf7-a345-00fb9bef9e7f" (UID: "7f80af7e-9055-4cf7-a345-00fb9bef9e7f"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.970023 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-scripts" (OuterVolumeSpecName: "scripts") pod "7f80af7e-9055-4cf7-a345-00fb9bef9e7f" (UID: "7f80af7e-9055-4cf7-a345-00fb9bef9e7f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.970751 4877 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.970779 4877 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.970789 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.970802 4877 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.971022 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "7f80af7e-9055-4cf7-a345-00fb9bef9e7f" (UID: "7f80af7e-9055-4cf7-a345-00fb9bef9e7f"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:57:54 crc kubenswrapper[4877]: I0128 16:57:54.977670 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-kube-api-access-9x4p2" (OuterVolumeSpecName: "kube-api-access-9x4p2") pod "7f80af7e-9055-4cf7-a345-00fb9bef9e7f" (UID: "7f80af7e-9055-4cf7-a345-00fb9bef9e7f"). InnerVolumeSpecName "kube-api-access-9x4p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.077246 4877 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.077308 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x4p2\" (UniqueName: \"kubernetes.io/projected/7f80af7e-9055-4cf7-a345-00fb9bef9e7f-kube-api-access-9x4p2\") on node \"crc\" DevicePath \"\"" Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.271842 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ggcj2-config-967nw" Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.276020 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ggcj2-config-967nw" event={"ID":"7f80af7e-9055-4cf7-a345-00fb9bef9e7f","Type":"ContainerDied","Data":"eba3d768b256000014c86d9a232cb62d8d3d6a4717f2cc98c582a5002e60ef3f"} Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.276071 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eba3d768b256000014c86d9a232cb62d8d3d6a4717f2cc98c582a5002e60ef3f" Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.346026 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" path="/var/lib/kubelet/pods/798f6b4e-5b70-4d96-93bc-a6ddbae18eaa/volumes" Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.355930 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 28 16:57:55 crc kubenswrapper[4877]: I0128 16:57:55.735043 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.086090 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ggcj2-config-967nw"] Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.103908 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ggcj2-config-967nw"] Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.219345 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-6m8l9"] Jan 28 16:57:56 crc kubenswrapper[4877]: E0128 16:57:56.219741 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerName="dnsmasq-dns" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.219759 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerName="dnsmasq-dns" Jan 28 16:57:56 crc kubenswrapper[4877]: E0128 16:57:56.219773 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f80af7e-9055-4cf7-a345-00fb9bef9e7f" containerName="ovn-config" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.219781 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f80af7e-9055-4cf7-a345-00fb9bef9e7f" containerName="ovn-config" Jan 28 16:57:56 crc kubenswrapper[4877]: E0128 16:57:56.219813 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerName="init" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.219820 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerName="init" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.220023 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="798f6b4e-5b70-4d96-93bc-a6ddbae18eaa" containerName="dnsmasq-dns" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.220051 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f80af7e-9055-4cf7-a345-00fb9bef9e7f" containerName="ovn-config" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.220703 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.246520 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-6m8l9"] Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.416779 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39b89193-b00c-41ea-bf92-cc3aebce6a31-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-6m8l9\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.417572 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snqgz\" (UniqueName: \"kubernetes.io/projected/39b89193-b00c-41ea-bf92-cc3aebce6a31-kube-api-access-snqgz\") pod \"mysqld-exporter-openstack-db-create-6m8l9\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.450213 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-7395-account-create-update-2gfnc"] Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.451439 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.454058 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.473394 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-7395-account-create-update-2gfnc"] Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.523239 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snqgz\" (UniqueName: \"kubernetes.io/projected/39b89193-b00c-41ea-bf92-cc3aebce6a31-kube-api-access-snqgz\") pod \"mysqld-exporter-openstack-db-create-6m8l9\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.523753 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39b89193-b00c-41ea-bf92-cc3aebce6a31-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-6m8l9\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.525385 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39b89193-b00c-41ea-bf92-cc3aebce6a31-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-6m8l9\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.550671 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snqgz\" (UniqueName: \"kubernetes.io/projected/39b89193-b00c-41ea-bf92-cc3aebce6a31-kube-api-access-snqgz\") pod \"mysqld-exporter-openstack-db-create-6m8l9\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.626854 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbbdg\" (UniqueName: \"kubernetes.io/projected/b0f6fbde-4240-4911-9b0d-50740836e659-kube-api-access-tbbdg\") pod \"mysqld-exporter-7395-account-create-update-2gfnc\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.627074 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f6fbde-4240-4911-9b0d-50740836e659-operator-scripts\") pod \"mysqld-exporter-7395-account-create-update-2gfnc\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.730017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbbdg\" (UniqueName: \"kubernetes.io/projected/b0f6fbde-4240-4911-9b0d-50740836e659-kube-api-access-tbbdg\") pod \"mysqld-exporter-7395-account-create-update-2gfnc\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.731289 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f6fbde-4240-4911-9b0d-50740836e659-operator-scripts\") pod \"mysqld-exporter-7395-account-create-update-2gfnc\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.732434 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f6fbde-4240-4911-9b0d-50740836e659-operator-scripts\") pod \"mysqld-exporter-7395-account-create-update-2gfnc\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.751324 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbbdg\" (UniqueName: \"kubernetes.io/projected/b0f6fbde-4240-4911-9b0d-50740836e659-kube-api-access-tbbdg\") pod \"mysqld-exporter-7395-account-create-update-2gfnc\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.770891 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:57:56 crc kubenswrapper[4877]: I0128 16:57:56.838007 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:57:57 crc kubenswrapper[4877]: I0128 16:57:57.350630 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f80af7e-9055-4cf7-a345-00fb9bef9e7f" path="/var/lib/kubelet/pods/7f80af7e-9055-4cf7-a345-00fb9bef9e7f/volumes" Jan 28 16:57:57 crc kubenswrapper[4877]: I0128 16:57:57.503298 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-6m8l9"] Jan 28 16:57:57 crc kubenswrapper[4877]: I0128 16:57:57.528880 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-7395-account-create-update-2gfnc"] Jan 28 16:57:58 crc kubenswrapper[4877]: W0128 16:57:58.025659 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0f6fbde_4240_4911_9b0d_50740836e659.slice/crio-9265e48a9cf934b15bc221668c8f4861076e9c53fcd1f2776a2f1cab51b0edb6 WatchSource:0}: Error finding container 9265e48a9cf934b15bc221668c8f4861076e9c53fcd1f2776a2f1cab51b0edb6: Status 404 returned error can't find the container with id 9265e48a9cf934b15bc221668c8f4861076e9c53fcd1f2776a2f1cab51b0edb6 Jan 28 16:57:58 crc kubenswrapper[4877]: I0128 16:57:58.046425 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Jan 28 16:57:58 crc kubenswrapper[4877]: I0128 16:57:58.314304 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" event={"ID":"b0f6fbde-4240-4911-9b0d-50740836e659","Type":"ContainerStarted","Data":"9265e48a9cf934b15bc221668c8f4861076e9c53fcd1f2776a2f1cab51b0edb6"} Jan 28 16:57:58 crc kubenswrapper[4877]: I0128 16:57:58.316312 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" event={"ID":"39b89193-b00c-41ea-bf92-cc3aebce6a31","Type":"ContainerStarted","Data":"bcbd7d9e8a37a6a7cdc79876a75030bf510a96e6db2a061cd438e46d304daea0"} Jan 28 16:57:59 crc kubenswrapper[4877]: I0128 16:57:59.329886 4877 generic.go:334] "Generic (PLEG): container finished" podID="39b89193-b00c-41ea-bf92-cc3aebce6a31" containerID="ab66e67e6a1d2d4a3369839f697f1625ae2f81705ddded572cba9f6dbdb3b757" exitCode=0 Jan 28 16:57:59 crc kubenswrapper[4877]: I0128 16:57:59.335047 4877 generic.go:334] "Generic (PLEG): container finished" podID="b0f6fbde-4240-4911-9b0d-50740836e659" containerID="836af63dae82f19a79347cb78d9d64884f4f01e0da3e2c54ccc7a2a4ed4bd753" exitCode=0 Jan 28 16:57:59 crc kubenswrapper[4877]: I0128 16:57:59.342147 4877 generic.go:334] "Generic (PLEG): container finished" podID="f8872319-aeb2-4f15-bfc5-3e9abd62770e" containerID="8959a1835ff54e5295927b5f85d9061f6a9a20238e82275c21b10eab840d361c" exitCode=0 Jan 28 16:57:59 crc kubenswrapper[4877]: I0128 16:57:59.346033 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" event={"ID":"39b89193-b00c-41ea-bf92-cc3aebce6a31","Type":"ContainerDied","Data":"ab66e67e6a1d2d4a3369839f697f1625ae2f81705ddded572cba9f6dbdb3b757"} Jan 28 16:57:59 crc kubenswrapper[4877]: I0128 16:57:59.346086 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" event={"ID":"b0f6fbde-4240-4911-9b0d-50740836e659","Type":"ContainerDied","Data":"836af63dae82f19a79347cb78d9d64884f4f01e0da3e2c54ccc7a2a4ed4bd753"} Jan 28 16:57:59 crc kubenswrapper[4877]: I0128 16:57:59.346103 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sp2zz" event={"ID":"f8872319-aeb2-4f15-bfc5-3e9abd62770e","Type":"ContainerDied","Data":"8959a1835ff54e5295927b5f85d9061f6a9a20238e82275c21b10eab840d361c"} Jan 28 16:58:00 crc kubenswrapper[4877]: I0128 16:58:00.032178 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:58:00 crc kubenswrapper[4877]: I0128 16:58:00.046570 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/03f31376-451f-4360-bea2-4d4e557568f0-etc-swift\") pod \"swift-storage-0\" (UID: \"03f31376-451f-4360-bea2-4d4e557568f0\") " pod="openstack/swift-storage-0" Jan 28 16:58:00 crc kubenswrapper[4877]: I0128 16:58:00.076275 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 16:58:00 crc kubenswrapper[4877]: I0128 16:58:00.855550 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 28 16:58:00 crc kubenswrapper[4877]: I0128 16:58:00.983255 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-85w55"] Jan 28 16:58:00 crc kubenswrapper[4877]: I0128 16:58:00.984625 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-85w55" Jan 28 16:58:00 crc kubenswrapper[4877]: I0128 16:58:00.986537 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.009079 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-85w55"] Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.059248 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ff226f3-96f3-413f-8f05-57e149367490-operator-scripts\") pod \"root-account-create-update-85w55\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " pod="openstack/root-account-create-update-85w55" Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.059378 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jvhc\" (UniqueName: \"kubernetes.io/projected/7ff226f3-96f3-413f-8f05-57e149367490-kube-api-access-5jvhc\") pod \"root-account-create-update-85w55\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " pod="openstack/root-account-create-update-85w55" Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.161244 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ff226f3-96f3-413f-8f05-57e149367490-operator-scripts\") pod \"root-account-create-update-85w55\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " pod="openstack/root-account-create-update-85w55" Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.161335 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jvhc\" (UniqueName: \"kubernetes.io/projected/7ff226f3-96f3-413f-8f05-57e149367490-kube-api-access-5jvhc\") pod \"root-account-create-update-85w55\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " pod="openstack/root-account-create-update-85w55" Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.162635 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ff226f3-96f3-413f-8f05-57e149367490-operator-scripts\") pod \"root-account-create-update-85w55\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " pod="openstack/root-account-create-update-85w55" Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.188338 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jvhc\" (UniqueName: \"kubernetes.io/projected/7ff226f3-96f3-413f-8f05-57e149367490-kube-api-access-5jvhc\") pod \"root-account-create-update-85w55\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " pod="openstack/root-account-create-update-85w55" Jan 28 16:58:01 crc kubenswrapper[4877]: I0128 16:58:01.316216 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-85w55" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.538148 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.603350 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39b89193-b00c-41ea-bf92-cc3aebce6a31-operator-scripts\") pod \"39b89193-b00c-41ea-bf92-cc3aebce6a31\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.603408 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snqgz\" (UniqueName: \"kubernetes.io/projected/39b89193-b00c-41ea-bf92-cc3aebce6a31-kube-api-access-snqgz\") pod \"39b89193-b00c-41ea-bf92-cc3aebce6a31\" (UID: \"39b89193-b00c-41ea-bf92-cc3aebce6a31\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.608699 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39b89193-b00c-41ea-bf92-cc3aebce6a31-kube-api-access-snqgz" (OuterVolumeSpecName: "kube-api-access-snqgz") pod "39b89193-b00c-41ea-bf92-cc3aebce6a31" (UID: "39b89193-b00c-41ea-bf92-cc3aebce6a31"). InnerVolumeSpecName "kube-api-access-snqgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.610597 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39b89193-b00c-41ea-bf92-cc3aebce6a31-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "39b89193-b00c-41ea-bf92-cc3aebce6a31" (UID: "39b89193-b00c-41ea-bf92-cc3aebce6a31"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.706055 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/39b89193-b00c-41ea-bf92-cc3aebce6a31-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.706098 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snqgz\" (UniqueName: \"kubernetes.io/projected/39b89193-b00c-41ea-bf92-cc3aebce6a31-kube-api-access-snqgz\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.741621 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.748766 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808221 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8872319-aeb2-4f15-bfc5-3e9abd62770e-etc-swift\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808371 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808433 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-swiftconf\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808557 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f6fbde-4240-4911-9b0d-50740836e659-operator-scripts\") pod \"b0f6fbde-4240-4911-9b0d-50740836e659\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808589 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-dispersionconf\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808625 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4h4sv\" (UniqueName: \"kubernetes.io/projected/f8872319-aeb2-4f15-bfc5-3e9abd62770e-kube-api-access-4h4sv\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808652 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808674 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-ring-data-devices\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.808742 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbbdg\" (UniqueName: \"kubernetes.io/projected/b0f6fbde-4240-4911-9b0d-50740836e659-kube-api-access-tbbdg\") pod \"b0f6fbde-4240-4911-9b0d-50740836e659\" (UID: \"b0f6fbde-4240-4911-9b0d-50740836e659\") " Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.810068 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0f6fbde-4240-4911-9b0d-50740836e659-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b0f6fbde-4240-4911-9b0d-50740836e659" (UID: "b0f6fbde-4240-4911-9b0d-50740836e659"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.814939 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8872319-aeb2-4f15-bfc5-3e9abd62770e-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.817069 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.819171 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8872319-aeb2-4f15-bfc5-3e9abd62770e-kube-api-access-4h4sv" (OuterVolumeSpecName: "kube-api-access-4h4sv") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e"). InnerVolumeSpecName "kube-api-access-4h4sv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.819842 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.825357 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0f6fbde-4240-4911-9b0d-50740836e659-kube-api-access-tbbdg" (OuterVolumeSpecName: "kube-api-access-tbbdg") pod "b0f6fbde-4240-4911-9b0d-50740836e659" (UID: "b0f6fbde-4240-4911-9b0d-50740836e659"). InnerVolumeSpecName "kube-api-access-tbbdg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: E0128 16:58:02.858874 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts podName:f8872319-aeb2-4f15-bfc5-3e9abd62770e nodeName:}" failed. No retries permitted until 2026-01-28 16:58:03.358841666 +0000 UTC m=+1386.917168564 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "scripts" (UniqueName: "kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e") : error deleting /var/lib/kubelet/pods/f8872319-aeb2-4f15-bfc5-3e9abd62770e/volume-subpaths: remove /var/lib/kubelet/pods/f8872319-aeb2-4f15-bfc5-3e9abd62770e/volume-subpaths: no such file or directory Jan 28 16:58:02 crc kubenswrapper[4877]: E0128 16:58:02.860622 4877 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle podName:f8872319-aeb2-4f15-bfc5-3e9abd62770e nodeName:}" failed. No retries permitted until 2026-01-28 16:58:03.360604984 +0000 UTC m=+1386.918931872 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e") : error deleting /var/lib/kubelet/pods/f8872319-aeb2-4f15-bfc5-3e9abd62770e/volume-subpaths: remove /var/lib/kubelet/pods/f8872319-aeb2-4f15-bfc5-3e9abd62770e/volume-subpaths: no such file or directory Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.868183 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.913213 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0f6fbde-4240-4911-9b0d-50740836e659-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.913262 4877 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.913278 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4h4sv\" (UniqueName: \"kubernetes.io/projected/f8872319-aeb2-4f15-bfc5-3e9abd62770e-kube-api-access-4h4sv\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.913290 4877 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.913302 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbbdg\" (UniqueName: \"kubernetes.io/projected/b0f6fbde-4240-4911-9b0d-50740836e659-kube-api-access-tbbdg\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.913313 4877 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8872319-aeb2-4f15-bfc5-3e9abd62770e-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:02 crc kubenswrapper[4877]: I0128 16:58:02.913326 4877 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.416601 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" event={"ID":"b0f6fbde-4240-4911-9b0d-50740836e659","Type":"ContainerDied","Data":"9265e48a9cf934b15bc221668c8f4861076e9c53fcd1f2776a2f1cab51b0edb6"} Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.417732 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9265e48a9cf934b15bc221668c8f4861076e9c53fcd1f2776a2f1cab51b0edb6" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.418069 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7395-account-create-update-2gfnc" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.426032 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-sp2zz" event={"ID":"f8872319-aeb2-4f15-bfc5-3e9abd62770e","Type":"ContainerDied","Data":"713ff8386c5b9964c01045112e062041782b8750e74aad36568a14d8f4fcc302"} Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.426632 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="713ff8386c5b9964c01045112e062041782b8750e74aad36568a14d8f4fcc302" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.426042 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-sp2zz" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.433163 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.433344 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle\") pod \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\" (UID: \"f8872319-aeb2-4f15-bfc5-3e9abd62770e\") " Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.434048 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts" (OuterVolumeSpecName: "scripts") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.434853 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" event={"ID":"39b89193-b00c-41ea-bf92-cc3aebce6a31","Type":"ContainerDied","Data":"bcbd7d9e8a37a6a7cdc79876a75030bf510a96e6db2a061cd438e46d304daea0"} Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.434893 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcbd7d9e8a37a6a7cdc79876a75030bf510a96e6db2a061cd438e46d304daea0" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.434943 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-6m8l9" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.448570 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8872319-aeb2-4f15-bfc5-3e9abd62770e" (UID: "f8872319-aeb2-4f15-bfc5-3e9abd62770e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.450101 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8872319-aeb2-4f15-bfc5-3e9abd62770e-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.450217 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8872319-aeb2-4f15-bfc5-3e9abd62770e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.579675 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-95w8q"] Jan 28 16:58:03 crc kubenswrapper[4877]: E0128 16:58:03.580286 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8872319-aeb2-4f15-bfc5-3e9abd62770e" containerName="swift-ring-rebalance" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.580303 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8872319-aeb2-4f15-bfc5-3e9abd62770e" containerName="swift-ring-rebalance" Jan 28 16:58:03 crc kubenswrapper[4877]: E0128 16:58:03.580356 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39b89193-b00c-41ea-bf92-cc3aebce6a31" containerName="mariadb-database-create" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.580365 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="39b89193-b00c-41ea-bf92-cc3aebce6a31" containerName="mariadb-database-create" Jan 28 16:58:03 crc kubenswrapper[4877]: E0128 16:58:03.580379 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f6fbde-4240-4911-9b0d-50740836e659" containerName="mariadb-account-create-update" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.580388 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f6fbde-4240-4911-9b0d-50740836e659" containerName="mariadb-account-create-update" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.580656 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f6fbde-4240-4911-9b0d-50740836e659" containerName="mariadb-account-create-update" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.580680 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="39b89193-b00c-41ea-bf92-cc3aebce6a31" containerName="mariadb-database-create" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.580702 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8872319-aeb2-4f15-bfc5-3e9abd62770e" containerName="swift-ring-rebalance" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.581719 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.588099 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-95w8q"] Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.656170 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7z6t\" (UniqueName: \"kubernetes.io/projected/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-kube-api-access-r7z6t\") pod \"keystone-db-create-95w8q\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.656263 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-operator-scripts\") pod \"keystone-db-create-95w8q\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.737849 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-adb0-account-create-update-rgtpf"] Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.739659 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.744207 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.749231 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-adb0-account-create-update-rgtpf"] Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.760078 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7z6t\" (UniqueName: \"kubernetes.io/projected/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-kube-api-access-r7z6t\") pod \"keystone-db-create-95w8q\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.760283 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-operator-scripts\") pod \"keystone-db-create-95w8q\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.858250 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-operator-scripts\") pod \"keystone-db-create-95w8q\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.862175 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nzzp\" (UniqueName: \"kubernetes.io/projected/9895a837-fc16-4072-9fb8-9b79cb56b53b-kube-api-access-5nzzp\") pod \"keystone-adb0-account-create-update-rgtpf\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.862307 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9895a837-fc16-4072-9fb8-9b79cb56b53b-operator-scripts\") pod \"keystone-adb0-account-create-update-rgtpf\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.872797 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7z6t\" (UniqueName: \"kubernetes.io/projected/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-kube-api-access-r7z6t\") pod \"keystone-db-create-95w8q\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.897019 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-p94jx"] Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.898644 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-p94jx" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.919386 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-p94jx"] Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.964050 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9895a837-fc16-4072-9fb8-9b79cb56b53b-operator-scripts\") pod \"keystone-adb0-account-create-update-rgtpf\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.964113 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4qs6\" (UniqueName: \"kubernetes.io/projected/b7997d91-de54-4f22-aa26-34db8d4d0c48-kube-api-access-q4qs6\") pod \"placement-db-create-p94jx\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " pod="openstack/placement-db-create-p94jx" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.964301 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nzzp\" (UniqueName: \"kubernetes.io/projected/9895a837-fc16-4072-9fb8-9b79cb56b53b-kube-api-access-5nzzp\") pod \"keystone-adb0-account-create-update-rgtpf\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.964360 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7997d91-de54-4f22-aa26-34db8d4d0c48-operator-scripts\") pod \"placement-db-create-p94jx\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " pod="openstack/placement-db-create-p94jx" Jan 28 16:58:03 crc kubenswrapper[4877]: I0128 16:58:03.988270 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nzzp\" (UniqueName: \"kubernetes.io/projected/9895a837-fc16-4072-9fb8-9b79cb56b53b-kube-api-access-5nzzp\") pod \"keystone-adb0-account-create-update-rgtpf\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.000395 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-20ab-account-create-update-j946m"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.002381 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.005506 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.021612 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-20ab-account-create-update-j946m"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.039906 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-85w55"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.066921 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8cs7\" (UniqueName: \"kubernetes.io/projected/b7450209-21cd-4ad0-b700-080ff83306e1-kube-api-access-h8cs7\") pod \"placement-20ab-account-create-update-j946m\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.066985 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4qs6\" (UniqueName: \"kubernetes.io/projected/b7997d91-de54-4f22-aa26-34db8d4d0c48-kube-api-access-q4qs6\") pod \"placement-db-create-p94jx\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " pod="openstack/placement-db-create-p94jx" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.067047 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7450209-21cd-4ad0-b700-080ff83306e1-operator-scripts\") pod \"placement-20ab-account-create-update-j946m\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.067148 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7997d91-de54-4f22-aa26-34db8d4d0c48-operator-scripts\") pod \"placement-db-create-p94jx\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " pod="openstack/placement-db-create-p94jx" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.067988 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7997d91-de54-4f22-aa26-34db8d4d0c48-operator-scripts\") pod \"placement-db-create-p94jx\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " pod="openstack/placement-db-create-p94jx" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.088361 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4qs6\" (UniqueName: \"kubernetes.io/projected/b7997d91-de54-4f22-aa26-34db8d4d0c48-kube-api-access-q4qs6\") pod \"placement-db-create-p94jx\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " pod="openstack/placement-db-create-p94jx" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.154368 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.169006 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8cs7\" (UniqueName: \"kubernetes.io/projected/b7450209-21cd-4ad0-b700-080ff83306e1-kube-api-access-h8cs7\") pod \"placement-20ab-account-create-update-j946m\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.169115 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7450209-21cd-4ad0-b700-080ff83306e1-operator-scripts\") pod \"placement-20ab-account-create-update-j946m\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.170377 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7450209-21cd-4ad0-b700-080ff83306e1-operator-scripts\") pod \"placement-20ab-account-create-update-j946m\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.188244 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8cs7\" (UniqueName: \"kubernetes.io/projected/b7450209-21cd-4ad0-b700-080ff83306e1-kube-api-access-h8cs7\") pod \"placement-20ab-account-create-update-j946m\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.250612 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-p94jx" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.292369 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-dhv7z"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.294274 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.305960 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-dhv7z"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.439461 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.456518 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-85w55" event={"ID":"7ff226f3-96f3-413f-8f05-57e149367490","Type":"ContainerStarted","Data":"e89f3f36948435caebba7c8aba73734cfe3502f569aa6b56038717937b9e3869"} Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.457177 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h62dh\" (UniqueName: \"kubernetes.io/projected/172466e0-ca09-497b-8356-3099ad380f3a-kube-api-access-h62dh\") pod \"glance-db-create-dhv7z\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.458108 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/172466e0-ca09-497b-8356-3099ad380f3a-operator-scripts\") pod \"glance-db-create-dhv7z\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.520916 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-ab30-account-create-update-k22df"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.532036 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.553177 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ab30-account-create-update-k22df"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.560150 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/172466e0-ca09-497b-8356-3099ad380f3a-operator-scripts\") pod \"glance-db-create-dhv7z\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.560223 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h62dh\" (UniqueName: \"kubernetes.io/projected/172466e0-ca09-497b-8356-3099ad380f3a-kube-api-access-h62dh\") pod \"glance-db-create-dhv7z\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.561293 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/172466e0-ca09-497b-8356-3099ad380f3a-operator-scripts\") pod \"glance-db-create-dhv7z\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.569492 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.615330 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h62dh\" (UniqueName: \"kubernetes.io/projected/172466e0-ca09-497b-8356-3099ad380f3a-kube-api-access-h62dh\") pod \"glance-db-create-dhv7z\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.642436 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.667362 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnlpx\" (UniqueName: \"kubernetes.io/projected/49096c30-9fbe-45ef-8cb5-1808efde086b-kube-api-access-fnlpx\") pod \"glance-ab30-account-create-update-k22df\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.667661 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49096c30-9fbe-45ef-8cb5-1808efde086b-operator-scripts\") pod \"glance-ab30-account-create-update-k22df\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.793394 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnlpx\" (UniqueName: \"kubernetes.io/projected/49096c30-9fbe-45ef-8cb5-1808efde086b-kube-api-access-fnlpx\") pod \"glance-ab30-account-create-update-k22df\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.794435 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49096c30-9fbe-45ef-8cb5-1808efde086b-operator-scripts\") pod \"glance-ab30-account-create-update-k22df\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.796065 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49096c30-9fbe-45ef-8cb5-1808efde086b-operator-scripts\") pod \"glance-ab30-account-create-update-k22df\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.837061 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-95w8q"] Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.838328 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnlpx\" (UniqueName: \"kubernetes.io/projected/49096c30-9fbe-45ef-8cb5-1808efde086b-kube-api-access-fnlpx\") pod \"glance-ab30-account-create-update-k22df\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.862736 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 28 16:58:04 crc kubenswrapper[4877]: I0128 16:58:04.981788 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:05 crc kubenswrapper[4877]: I0128 16:58:05.007699 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-p94jx"] Jan 28 16:58:05 crc kubenswrapper[4877]: I0128 16:58:05.294335 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-20ab-account-create-update-j946m"] Jan 28 16:58:05 crc kubenswrapper[4877]: I0128 16:58:05.422939 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-dhv7z"] Jan 28 16:58:05 crc kubenswrapper[4877]: I0128 16:58:05.469342 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-95w8q" event={"ID":"ed43d723-2ac6-40d0-aaea-148be1ceb3a4","Type":"ContainerStarted","Data":"af9677cc58daaa44f69f48ca3e340f1128dd9ada9e8e500c41be3775dcfd6a59"} Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.596015 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-hjfls"] Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.597746 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.605586 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-hjfls"] Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.742423 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2117f641-020b-4fbc-b813-e56dab47f1c6-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-hjfls\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.743138 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rx7l\" (UniqueName: \"kubernetes.io/projected/2117f641-020b-4fbc-b813-e56dab47f1c6-kube-api-access-4rx7l\") pod \"mysqld-exporter-openstack-cell1-db-create-hjfls\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.813997 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-eeda-account-create-update-pmwp8"] Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.815583 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.818938 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.824935 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-eeda-account-create-update-pmwp8"] Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.875019 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rx7l\" (UniqueName: \"kubernetes.io/projected/2117f641-020b-4fbc-b813-e56dab47f1c6-kube-api-access-4rx7l\") pod \"mysqld-exporter-openstack-cell1-db-create-hjfls\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.875118 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2117f641-020b-4fbc-b813-e56dab47f1c6-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-hjfls\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.875990 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2117f641-020b-4fbc-b813-e56dab47f1c6-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-hjfls\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.921842 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rx7l\" (UniqueName: \"kubernetes.io/projected/2117f641-020b-4fbc-b813-e56dab47f1c6-kube-api-access-4rx7l\") pod \"mysqld-exporter-openstack-cell1-db-create-hjfls\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.976748 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl8qj\" (UniqueName: \"kubernetes.io/projected/cd70e089-7495-454b-8f44-eddda03fd848-kube-api-access-pl8qj\") pod \"mysqld-exporter-eeda-account-create-update-pmwp8\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:06 crc kubenswrapper[4877]: I0128 16:58:06.976820 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd70e089-7495-454b-8f44-eddda03fd848-operator-scripts\") pod \"mysqld-exporter-eeda-account-create-update-pmwp8\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:07 crc kubenswrapper[4877]: I0128 16:58:07.078835 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl8qj\" (UniqueName: \"kubernetes.io/projected/cd70e089-7495-454b-8f44-eddda03fd848-kube-api-access-pl8qj\") pod \"mysqld-exporter-eeda-account-create-update-pmwp8\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:07 crc kubenswrapper[4877]: I0128 16:58:07.078901 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd70e089-7495-454b-8f44-eddda03fd848-operator-scripts\") pod \"mysqld-exporter-eeda-account-create-update-pmwp8\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:07 crc kubenswrapper[4877]: I0128 16:58:07.079816 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd70e089-7495-454b-8f44-eddda03fd848-operator-scripts\") pod \"mysqld-exporter-eeda-account-create-update-pmwp8\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:07 crc kubenswrapper[4877]: I0128 16:58:07.100703 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl8qj\" (UniqueName: \"kubernetes.io/projected/cd70e089-7495-454b-8f44-eddda03fd848-kube-api-access-pl8qj\") pod \"mysqld-exporter-eeda-account-create-update-pmwp8\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:07 crc kubenswrapper[4877]: I0128 16:58:07.184538 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:07 crc kubenswrapper[4877]: I0128 16:58:07.217574 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:10 crc kubenswrapper[4877]: I0128 16:58:10.835746 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 28 16:58:10 crc kubenswrapper[4877]: I0128 16:58:10.851111 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 28 16:58:11 crc kubenswrapper[4877]: I0128 16:58:11.088775 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.128:5671: connect: connection refused" Jan 28 16:58:11 crc kubenswrapper[4877]: I0128 16:58:11.276465 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.273909 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9895a837-fc16-4072-9fb8-9b79cb56b53b-operator-scripts\") pod \"keystone-adb0-account-create-update-rgtpf\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:13 crc kubenswrapper[4877]: W0128 16:58:13.304305 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7997d91_de54_4f22_aa26_34db8d4d0c48.slice/crio-7dc8d33c5340f0f05492671f3fbe5a802e76a31028b88b782fbada303e05d6d1 WatchSource:0}: Error finding container 7dc8d33c5340f0f05492671f3fbe5a802e76a31028b88b782fbada303e05d6d1: Status 404 returned error can't find the container with id 7dc8d33c5340f0f05492671f3fbe5a802e76a31028b88b782fbada303e05d6d1 Jan 28 16:58:13 crc kubenswrapper[4877]: W0128 16:58:13.325534 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod172466e0_ca09_497b_8356_3099ad380f3a.slice/crio-c9e7ceede4a5d9fe08e5f78538771a931b9067a15db8d48eb4ce6f172b707f59 WatchSource:0}: Error finding container c9e7ceede4a5d9fe08e5f78538771a931b9067a15db8d48eb4ce6f172b707f59: Status 404 returned error can't find the container with id c9e7ceede4a5d9fe08e5f78538771a931b9067a15db8d48eb4ce6f172b707f59 Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.420796 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.464211 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.574524 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"1f2eb8c69f9f03c2ae5b88b0a51468817f1c186a1b959b5363db83d25b58dd3f"} Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.577495 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhv7z" event={"ID":"172466e0-ca09-497b-8356-3099ad380f3a","Type":"ContainerStarted","Data":"c9e7ceede4a5d9fe08e5f78538771a931b9067a15db8d48eb4ce6f172b707f59"} Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.592385 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-p94jx" event={"ID":"b7997d91-de54-4f22-aa26-34db8d4d0c48","Type":"ContainerStarted","Data":"7dc8d33c5340f0f05492671f3fbe5a802e76a31028b88b782fbada303e05d6d1"} Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.595984 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-85w55" event={"ID":"7ff226f3-96f3-413f-8f05-57e149367490","Type":"ContainerStarted","Data":"7efacec95ed943cd67ca1e09e8454f8725b4b6d7395604886f42e58a36c4a64c"} Jan 28 16:58:13 crc kubenswrapper[4877]: I0128 16:58:13.598027 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-20ab-account-create-update-j946m" event={"ID":"b7450209-21cd-4ad0-b700-080ff83306e1","Type":"ContainerStarted","Data":"ede73bc28a4025127850be79f5ca8a5d60041c139d329714b2e2ce99b84ff19d"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.077963 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-eeda-account-create-update-pmwp8"] Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.086441 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ab30-account-create-update-k22df"] Jan 28 16:58:14 crc kubenswrapper[4877]: W0128 16:58:14.104333 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49096c30_9fbe_45ef_8cb5_1808efde086b.slice/crio-a859bd6bec9ebd9f1b93d4c981f8b18961ebf5e03eab312cc417f8ba2372be59 WatchSource:0}: Error finding container a859bd6bec9ebd9f1b93d4c981f8b18961ebf5e03eab312cc417f8ba2372be59: Status 404 returned error can't find the container with id a859bd6bec9ebd9f1b93d4c981f8b18961ebf5e03eab312cc417f8ba2372be59 Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.234814 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-hjfls"] Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.254458 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-adb0-account-create-update-rgtpf"] Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.609983 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" event={"ID":"2117f641-020b-4fbc-b813-e56dab47f1c6","Type":"ContainerStarted","Data":"82193784a1b53b3e6d8691cb7f5cc85d32506e948eda493a8402b62b8b6ffa1d"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.611998 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab30-account-create-update-k22df" event={"ID":"49096c30-9fbe-45ef-8cb5-1808efde086b","Type":"ContainerStarted","Data":"a859bd6bec9ebd9f1b93d4c981f8b18961ebf5e03eab312cc417f8ba2372be59"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.614435 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-95w8q" event={"ID":"ed43d723-2ac6-40d0-aaea-148be1ceb3a4","Type":"ContainerStarted","Data":"ff675c788d25e2dc6a8ff30a48039ea321025b0eaf0bff5337f4a4e3dd5732eb"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.616288 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-20ab-account-create-update-j946m" event={"ID":"b7450209-21cd-4ad0-b700-080ff83306e1","Type":"ContainerStarted","Data":"ada214a4032c423787513923e28048ce45bcac98352a8f888a2adf6e2fe15c89"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.618310 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-adb0-account-create-update-rgtpf" event={"ID":"9895a837-fc16-4072-9fb8-9b79cb56b53b","Type":"ContainerStarted","Data":"f48eb40f7ee7f463f3e4e07fe6794d4a41379b4cbd4e0e1db3338cdeb77afa29"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.620796 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" event={"ID":"cd70e089-7495-454b-8f44-eddda03fd848","Type":"ContainerStarted","Data":"0d90615ce656e00b4888224488031642cafdfbde0262855ee388c23101eae1d0"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.622725 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhv7z" event={"ID":"172466e0-ca09-497b-8356-3099ad380f3a","Type":"ContainerStarted","Data":"5133cbf756c0fbe72c5dedf2ead3cc26813b5eae707482cff2fd43ff854f3127"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.625667 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-p94jx" event={"ID":"b7997d91-de54-4f22-aa26-34db8d4d0c48","Type":"ContainerStarted","Data":"b01a3faf5022f37e2d05c5373f278b936ce816b9887f8b193ab5db224f90a560"} Jan 28 16:58:14 crc kubenswrapper[4877]: I0128 16:58:14.653444 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-85w55" podStartSLOduration=14.653422566 podStartE2EDuration="14.653422566s" podCreationTimestamp="2026-01-28 16:58:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:14.646239204 +0000 UTC m=+1398.204566092" watchObservedRunningTime="2026-01-28 16:58:14.653422566 +0000 UTC m=+1398.211749454" Jan 28 16:58:15 crc kubenswrapper[4877]: I0128 16:58:15.626732 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ggcj2" Jan 28 16:58:16 crc kubenswrapper[4877]: I0128 16:58:16.647503 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab30-account-create-update-k22df" event={"ID":"49096c30-9fbe-45ef-8cb5-1808efde086b","Type":"ContainerStarted","Data":"8000a63f1cb034db07b6545130e0d5fc12c64de9cf3fb2fd9984c8a6eea75d2b"} Jan 28 16:58:16 crc kubenswrapper[4877]: I0128 16:58:16.649551 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-adb0-account-create-update-rgtpf" event={"ID":"9895a837-fc16-4072-9fb8-9b79cb56b53b","Type":"ContainerStarted","Data":"62d6f23fa596fad2bdb306188ca1e9b4614a956a602f23075b1319f3b40f752f"} Jan 28 16:58:16 crc kubenswrapper[4877]: I0128 16:58:16.651877 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" event={"ID":"cd70e089-7495-454b-8f44-eddda03fd848","Type":"ContainerStarted","Data":"258d9070cf54905c414efaf45be6180e8c26a8d40f942989face354b81e61ddd"} Jan 28 16:58:16 crc kubenswrapper[4877]: I0128 16:58:16.655577 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerStarted","Data":"f8d8b88d5022a7165f7254dc48c17aeaf1910fdac484ecbe5086133c39685ab5"} Jan 28 16:58:16 crc kubenswrapper[4877]: I0128 16:58:16.658088 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" event={"ID":"2117f641-020b-4fbc-b813-e56dab47f1c6","Type":"ContainerStarted","Data":"21288ee2ae5e7d109aad960e485ec91fc7cc722aafb84c45a449320cd2592c1f"} Jan 28 16:58:16 crc kubenswrapper[4877]: I0128 16:58:16.675426 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-p94jx" podStartSLOduration=13.675412283 podStartE2EDuration="13.675412283s" podCreationTimestamp="2026-01-28 16:58:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:16.674338765 +0000 UTC m=+1400.232665663" watchObservedRunningTime="2026-01-28 16:58:16.675412283 +0000 UTC m=+1400.233739171" Jan 28 16:58:16 crc kubenswrapper[4877]: I0128 16:58:16.702122 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-95w8q" podStartSLOduration=13.702089488 podStartE2EDuration="13.702089488s" podCreationTimestamp="2026-01-28 16:58:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:16.689291635 +0000 UTC m=+1400.247618523" watchObservedRunningTime="2026-01-28 16:58:16.702089488 +0000 UTC m=+1400.260416376" Jan 28 16:58:19 crc kubenswrapper[4877]: I0128 16:58:19.731448 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-ab30-account-create-update-k22df" podStartSLOduration=15.731418872 podStartE2EDuration="15.731418872s" podCreationTimestamp="2026-01-28 16:58:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:19.716824281 +0000 UTC m=+1403.275151209" watchObservedRunningTime="2026-01-28 16:58:19.731418872 +0000 UTC m=+1403.289745780" Jan 28 16:58:19 crc kubenswrapper[4877]: I0128 16:58:19.750634 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" podStartSLOduration=13.750604036 podStartE2EDuration="13.750604036s" podCreationTimestamp="2026-01-28 16:58:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:19.737172226 +0000 UTC m=+1403.295499114" watchObservedRunningTime="2026-01-28 16:58:19.750604036 +0000 UTC m=+1403.308930934" Jan 28 16:58:19 crc kubenswrapper[4877]: I0128 16:58:19.758715 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" podStartSLOduration=13.758689613 podStartE2EDuration="13.758689613s" podCreationTimestamp="2026-01-28 16:58:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:19.752795764 +0000 UTC m=+1403.311122652" watchObservedRunningTime="2026-01-28 16:58:19.758689613 +0000 UTC m=+1403.317016511" Jan 28 16:58:19 crc kubenswrapper[4877]: I0128 16:58:19.781588 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-adb0-account-create-update-rgtpf" podStartSLOduration=16.781567665 podStartE2EDuration="16.781567665s" podCreationTimestamp="2026-01-28 16:58:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:19.768543866 +0000 UTC m=+1403.326870794" watchObservedRunningTime="2026-01-28 16:58:19.781567665 +0000 UTC m=+1403.339894573" Jan 28 16:58:20 crc kubenswrapper[4877]: I0128 16:58:20.738194 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-dhv7z" podStartSLOduration=16.738165322 podStartE2EDuration="16.738165322s" podCreationTimestamp="2026-01-28 16:58:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:20.730352893 +0000 UTC m=+1404.288679781" watchObservedRunningTime="2026-01-28 16:58:20.738165322 +0000 UTC m=+1404.296492210" Jan 28 16:58:20 crc kubenswrapper[4877]: I0128 16:58:20.754794 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-20ab-account-create-update-j946m" podStartSLOduration=17.754772467 podStartE2EDuration="17.754772467s" podCreationTimestamp="2026-01-28 16:58:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:20.744047089 +0000 UTC m=+1404.302373977" watchObservedRunningTime="2026-01-28 16:58:20.754772467 +0000 UTC m=+1404.313099355" Jan 28 16:58:20 crc kubenswrapper[4877]: I0128 16:58:20.833981 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 28 16:58:20 crc kubenswrapper[4877]: I0128 16:58:20.850552 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.087043 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.128:5671: connect: connection refused" Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.274098 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.725439 4877 generic.go:334] "Generic (PLEG): container finished" podID="ed43d723-2ac6-40d0-aaea-148be1ceb3a4" containerID="ff675c788d25e2dc6a8ff30a48039ea321025b0eaf0bff5337f4a4e3dd5732eb" exitCode=0 Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.725513 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-95w8q" event={"ID":"ed43d723-2ac6-40d0-aaea-148be1ceb3a4","Type":"ContainerDied","Data":"ff675c788d25e2dc6a8ff30a48039ea321025b0eaf0bff5337f4a4e3dd5732eb"} Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.729884 4877 generic.go:334] "Generic (PLEG): container finished" podID="cd70e089-7495-454b-8f44-eddda03fd848" containerID="258d9070cf54905c414efaf45be6180e8c26a8d40f942989face354b81e61ddd" exitCode=0 Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.729944 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" event={"ID":"cd70e089-7495-454b-8f44-eddda03fd848","Type":"ContainerDied","Data":"258d9070cf54905c414efaf45be6180e8c26a8d40f942989face354b81e61ddd"} Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.733276 4877 generic.go:334] "Generic (PLEG): container finished" podID="172466e0-ca09-497b-8356-3099ad380f3a" containerID="5133cbf756c0fbe72c5dedf2ead3cc26813b5eae707482cff2fd43ff854f3127" exitCode=0 Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.733369 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhv7z" event={"ID":"172466e0-ca09-497b-8356-3099ad380f3a","Type":"ContainerDied","Data":"5133cbf756c0fbe72c5dedf2ead3cc26813b5eae707482cff2fd43ff854f3127"} Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.735544 4877 generic.go:334] "Generic (PLEG): container finished" podID="b7997d91-de54-4f22-aa26-34db8d4d0c48" containerID="b01a3faf5022f37e2d05c5373f278b936ce816b9887f8b193ab5db224f90a560" exitCode=0 Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.735607 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-p94jx" event={"ID":"b7997d91-de54-4f22-aa26-34db8d4d0c48","Type":"ContainerDied","Data":"b01a3faf5022f37e2d05c5373f278b936ce816b9887f8b193ab5db224f90a560"} Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.738016 4877 generic.go:334] "Generic (PLEG): container finished" podID="2117f641-020b-4fbc-b813-e56dab47f1c6" containerID="21288ee2ae5e7d109aad960e485ec91fc7cc722aafb84c45a449320cd2592c1f" exitCode=0 Jan 28 16:58:21 crc kubenswrapper[4877]: I0128 16:58:21.738060 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" event={"ID":"2117f641-020b-4fbc-b813-e56dab47f1c6","Type":"ContainerDied","Data":"21288ee2ae5e7d109aad960e485ec91fc7cc722aafb84c45a449320cd2592c1f"} Jan 28 16:58:22 crc kubenswrapper[4877]: I0128 16:58:22.765707 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"f3426ac731d17ead932f3a20a6d5dcc1f9857a433d539b8f32b449ae1b875d8b"} Jan 28 16:58:22 crc kubenswrapper[4877]: I0128 16:58:22.766336 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"d151705081325ecbc2c434f55d148c8beadca2c8addd08a09128114a201ff182"} Jan 28 16:58:22 crc kubenswrapper[4877]: I0128 16:58:22.768955 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerStarted","Data":"f75eb898b0085ad22f7310c9505b7c2e8609e44f67bcc39d588a63f3a2c9038b"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.260749 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.378758 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2117f641-020b-4fbc-b813-e56dab47f1c6-operator-scripts\") pod \"2117f641-020b-4fbc-b813-e56dab47f1c6\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.379086 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rx7l\" (UniqueName: \"kubernetes.io/projected/2117f641-020b-4fbc-b813-e56dab47f1c6-kube-api-access-4rx7l\") pod \"2117f641-020b-4fbc-b813-e56dab47f1c6\" (UID: \"2117f641-020b-4fbc-b813-e56dab47f1c6\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.379967 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2117f641-020b-4fbc-b813-e56dab47f1c6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2117f641-020b-4fbc-b813-e56dab47f1c6" (UID: "2117f641-020b-4fbc-b813-e56dab47f1c6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.391388 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2117f641-020b-4fbc-b813-e56dab47f1c6-kube-api-access-4rx7l" (OuterVolumeSpecName: "kube-api-access-4rx7l") pod "2117f641-020b-4fbc-b813-e56dab47f1c6" (UID: "2117f641-020b-4fbc-b813-e56dab47f1c6"). InnerVolumeSpecName "kube-api-access-4rx7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.483094 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2117f641-020b-4fbc-b813-e56dab47f1c6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.483134 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rx7l\" (UniqueName: \"kubernetes.io/projected/2117f641-020b-4fbc-b813-e56dab47f1c6-kube-api-access-4rx7l\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.538073 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.556939 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.583487 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-p94jx" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.588985 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.686987 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/172466e0-ca09-497b-8356-3099ad380f3a-operator-scripts\") pod \"172466e0-ca09-497b-8356-3099ad380f3a\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.687359 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h62dh\" (UniqueName: \"kubernetes.io/projected/172466e0-ca09-497b-8356-3099ad380f3a-kube-api-access-h62dh\") pod \"172466e0-ca09-497b-8356-3099ad380f3a\" (UID: \"172466e0-ca09-497b-8356-3099ad380f3a\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.687452 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4qs6\" (UniqueName: \"kubernetes.io/projected/b7997d91-de54-4f22-aa26-34db8d4d0c48-kube-api-access-q4qs6\") pod \"b7997d91-de54-4f22-aa26-34db8d4d0c48\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.687575 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7997d91-de54-4f22-aa26-34db8d4d0c48-operator-scripts\") pod \"b7997d91-de54-4f22-aa26-34db8d4d0c48\" (UID: \"b7997d91-de54-4f22-aa26-34db8d4d0c48\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.687656 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd70e089-7495-454b-8f44-eddda03fd848-operator-scripts\") pod \"cd70e089-7495-454b-8f44-eddda03fd848\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.687731 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl8qj\" (UniqueName: \"kubernetes.io/projected/cd70e089-7495-454b-8f44-eddda03fd848-kube-api-access-pl8qj\") pod \"cd70e089-7495-454b-8f44-eddda03fd848\" (UID: \"cd70e089-7495-454b-8f44-eddda03fd848\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.687866 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7z6t\" (UniqueName: \"kubernetes.io/projected/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-kube-api-access-r7z6t\") pod \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.688044 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-operator-scripts\") pod \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\" (UID: \"ed43d723-2ac6-40d0-aaea-148be1ceb3a4\") " Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.688018 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/172466e0-ca09-497b-8356-3099ad380f3a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "172466e0-ca09-497b-8356-3099ad380f3a" (UID: "172466e0-ca09-497b-8356-3099ad380f3a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.688583 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7997d91-de54-4f22-aa26-34db8d4d0c48-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b7997d91-de54-4f22-aa26-34db8d4d0c48" (UID: "b7997d91-de54-4f22-aa26-34db8d4d0c48"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.689324 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ed43d723-2ac6-40d0-aaea-148be1ceb3a4" (UID: "ed43d723-2ac6-40d0-aaea-148be1ceb3a4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.689855 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd70e089-7495-454b-8f44-eddda03fd848-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cd70e089-7495-454b-8f44-eddda03fd848" (UID: "cd70e089-7495-454b-8f44-eddda03fd848"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.707679 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7997d91-de54-4f22-aa26-34db8d4d0c48-kube-api-access-q4qs6" (OuterVolumeSpecName: "kube-api-access-q4qs6") pod "b7997d91-de54-4f22-aa26-34db8d4d0c48" (UID: "b7997d91-de54-4f22-aa26-34db8d4d0c48"). InnerVolumeSpecName "kube-api-access-q4qs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.707794 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-kube-api-access-r7z6t" (OuterVolumeSpecName: "kube-api-access-r7z6t") pod "ed43d723-2ac6-40d0-aaea-148be1ceb3a4" (UID: "ed43d723-2ac6-40d0-aaea-148be1ceb3a4"). InnerVolumeSpecName "kube-api-access-r7z6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.715878 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70e089-7495-454b-8f44-eddda03fd848-kube-api-access-pl8qj" (OuterVolumeSpecName: "kube-api-access-pl8qj") pod "cd70e089-7495-454b-8f44-eddda03fd848" (UID: "cd70e089-7495-454b-8f44-eddda03fd848"). InnerVolumeSpecName "kube-api-access-pl8qj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.729020 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/172466e0-ca09-497b-8356-3099ad380f3a-kube-api-access-h62dh" (OuterVolumeSpecName: "kube-api-access-h62dh") pod "172466e0-ca09-497b-8356-3099ad380f3a" (UID: "172466e0-ca09-497b-8356-3099ad380f3a"). InnerVolumeSpecName "kube-api-access-h62dh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.786778 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-p94jx" event={"ID":"b7997d91-de54-4f22-aa26-34db8d4d0c48","Type":"ContainerDied","Data":"7dc8d33c5340f0f05492671f3fbe5a802e76a31028b88b782fbada303e05d6d1"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.786825 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dc8d33c5340f0f05492671f3fbe5a802e76a31028b88b782fbada303e05d6d1" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.786895 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-p94jx" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.791890 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/172466e0-ca09-497b-8356-3099ad380f3a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.792431 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h62dh\" (UniqueName: \"kubernetes.io/projected/172466e0-ca09-497b-8356-3099ad380f3a-kube-api-access-h62dh\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.792447 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4qs6\" (UniqueName: \"kubernetes.io/projected/b7997d91-de54-4f22-aa26-34db8d4d0c48-kube-api-access-q4qs6\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.792459 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7997d91-de54-4f22-aa26-34db8d4d0c48-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.792499 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd70e089-7495-454b-8f44-eddda03fd848-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.792516 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl8qj\" (UniqueName: \"kubernetes.io/projected/cd70e089-7495-454b-8f44-eddda03fd848-kube-api-access-pl8qj\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.792533 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7z6t\" (UniqueName: \"kubernetes.io/projected/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-kube-api-access-r7z6t\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.792547 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed43d723-2ac6-40d0-aaea-148be1ceb3a4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.799038 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" event={"ID":"2117f641-020b-4fbc-b813-e56dab47f1c6","Type":"ContainerDied","Data":"82193784a1b53b3e6d8691cb7f5cc85d32506e948eda493a8402b62b8b6ffa1d"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.799103 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82193784a1b53b3e6d8691cb7f5cc85d32506e948eda493a8402b62b8b6ffa1d" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.799196 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-hjfls" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.804370 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-95w8q" event={"ID":"ed43d723-2ac6-40d0-aaea-148be1ceb3a4","Type":"ContainerDied","Data":"af9677cc58daaa44f69f48ca3e340f1128dd9ada9e8e500c41be3775dcfd6a59"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.804400 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-95w8q" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.804432 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af9677cc58daaa44f69f48ca3e340f1128dd9ada9e8e500c41be3775dcfd6a59" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.855880 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"8c396e9d0ea4baac77d6eda0dbe34d2df4901b2482dc53fd906a9c95d3beed83"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.855944 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"a882951f5969ed284bbc84b2d2bd376983fa940e5060a1cd83ef2a81c8388032"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.886228 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.887709 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-eeda-account-create-update-pmwp8" event={"ID":"cd70e089-7495-454b-8f44-eddda03fd848","Type":"ContainerDied","Data":"0d90615ce656e00b4888224488031642cafdfbde0262855ee388c23101eae1d0"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.887784 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d90615ce656e00b4888224488031642cafdfbde0262855ee388c23101eae1d0" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.907700 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhv7z" event={"ID":"172466e0-ca09-497b-8356-3099ad380f3a","Type":"ContainerDied","Data":"c9e7ceede4a5d9fe08e5f78538771a931b9067a15db8d48eb4ce6f172b707f59"} Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.907743 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9e7ceede4a5d9fe08e5f78538771a931b9067a15db8d48eb4ce6f172b707f59" Jan 28 16:58:23 crc kubenswrapper[4877]: I0128 16:58:23.907807 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhv7z" Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.943777 4877 generic.go:334] "Generic (PLEG): container finished" podID="49096c30-9fbe-45ef-8cb5-1808efde086b" containerID="8000a63f1cb034db07b6545130e0d5fc12c64de9cf3fb2fd9984c8a6eea75d2b" exitCode=0 Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.944134 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab30-account-create-update-k22df" event={"ID":"49096c30-9fbe-45ef-8cb5-1808efde086b","Type":"ContainerDied","Data":"8000a63f1cb034db07b6545130e0d5fc12c64de9cf3fb2fd9984c8a6eea75d2b"} Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.949843 4877 generic.go:334] "Generic (PLEG): container finished" podID="b7450209-21cd-4ad0-b700-080ff83306e1" containerID="ada214a4032c423787513923e28048ce45bcac98352a8f888a2adf6e2fe15c89" exitCode=0 Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.949945 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-20ab-account-create-update-j946m" event={"ID":"b7450209-21cd-4ad0-b700-080ff83306e1","Type":"ContainerDied","Data":"ada214a4032c423787513923e28048ce45bcac98352a8f888a2adf6e2fe15c89"} Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.955348 4877 generic.go:334] "Generic (PLEG): container finished" podID="9895a837-fc16-4072-9fb8-9b79cb56b53b" containerID="62d6f23fa596fad2bdb306188ca1e9b4614a956a602f23075b1319f3b40f752f" exitCode=0 Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.955452 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-adb0-account-create-update-rgtpf" event={"ID":"9895a837-fc16-4072-9fb8-9b79cb56b53b","Type":"ContainerDied","Data":"62d6f23fa596fad2bdb306188ca1e9b4614a956a602f23075b1319f3b40f752f"} Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.962555 4877 generic.go:334] "Generic (PLEG): container finished" podID="7ff226f3-96f3-413f-8f05-57e149367490" containerID="7efacec95ed943cd67ca1e09e8454f8725b4b6d7395604886f42e58a36c4a64c" exitCode=0 Jan 28 16:58:25 crc kubenswrapper[4877]: I0128 16:58:25.962618 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-85w55" event={"ID":"7ff226f3-96f3-413f-8f05-57e149367490","Type":"ContainerDied","Data":"7efacec95ed943cd67ca1e09e8454f8725b4b6d7395604886f42e58a36c4a64c"} Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.055517 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 16:58:27 crc kubenswrapper[4877]: E0128 16:58:27.056653 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7997d91-de54-4f22-aa26-34db8d4d0c48" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.056674 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7997d91-de54-4f22-aa26-34db8d4d0c48" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: E0128 16:58:27.056689 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="172466e0-ca09-497b-8356-3099ad380f3a" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.056696 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="172466e0-ca09-497b-8356-3099ad380f3a" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: E0128 16:58:27.056722 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed43d723-2ac6-40d0-aaea-148be1ceb3a4" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.056733 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed43d723-2ac6-40d0-aaea-148be1ceb3a4" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: E0128 16:58:27.056758 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2117f641-020b-4fbc-b813-e56dab47f1c6" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.056766 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="2117f641-020b-4fbc-b813-e56dab47f1c6" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: E0128 16:58:27.056792 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd70e089-7495-454b-8f44-eddda03fd848" containerName="mariadb-account-create-update" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.056800 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd70e089-7495-454b-8f44-eddda03fd848" containerName="mariadb-account-create-update" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.057022 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed43d723-2ac6-40d0-aaea-148be1ceb3a4" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.057039 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="172466e0-ca09-497b-8356-3099ad380f3a" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.057057 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="2117f641-020b-4fbc-b813-e56dab47f1c6" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.057083 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd70e089-7495-454b-8f44-eddda03fd848" containerName="mariadb-account-create-update" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.057098 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7997d91-de54-4f22-aa26-34db8d4d0c48" containerName="mariadb-database-create" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.058099 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.062164 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.069688 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.197283 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.197374 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85h2c\" (UniqueName: \"kubernetes.io/projected/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-kube-api-access-85h2c\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.197571 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-config-data\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.301173 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-config-data\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.301916 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.301984 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85h2c\" (UniqueName: \"kubernetes.io/projected/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-kube-api-access-85h2c\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.311863 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.312340 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-config-data\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.330491 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85h2c\" (UniqueName: \"kubernetes.io/projected/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-kube-api-access-85h2c\") pod \"mysqld-exporter-0\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.465717 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 28 16:58:27 crc kubenswrapper[4877]: I0128 16:58:27.906705 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.058324 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8cs7\" (UniqueName: \"kubernetes.io/projected/b7450209-21cd-4ad0-b700-080ff83306e1-kube-api-access-h8cs7\") pod \"b7450209-21cd-4ad0-b700-080ff83306e1\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.058875 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7450209-21cd-4ad0-b700-080ff83306e1-operator-scripts\") pod \"b7450209-21cd-4ad0-b700-080ff83306e1\" (UID: \"b7450209-21cd-4ad0-b700-080ff83306e1\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.060548 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7450209-21cd-4ad0-b700-080ff83306e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b7450209-21cd-4ad0-b700-080ff83306e1" (UID: "b7450209-21cd-4ad0-b700-080ff83306e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.090706 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7450209-21cd-4ad0-b700-080ff83306e1-kube-api-access-h8cs7" (OuterVolumeSpecName: "kube-api-access-h8cs7") pod "b7450209-21cd-4ad0-b700-080ff83306e1" (UID: "b7450209-21cd-4ad0-b700-080ff83306e1"). InnerVolumeSpecName "kube-api-access-h8cs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.148963 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerStarted","Data":"3eda1fa15eaacb7af31c17d834c2201f36f138e9d474bb328aa5ca3da46a43e3"} Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.162038 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8cs7\" (UniqueName: \"kubernetes.io/projected/b7450209-21cd-4ad0-b700-080ff83306e1-kube-api-access-h8cs7\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.162084 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7450209-21cd-4ad0-b700-080ff83306e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.177173 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-20ab-account-create-update-j946m" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.177976 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-20ab-account-create-update-j946m" event={"ID":"b7450209-21cd-4ad0-b700-080ff83306e1","Type":"ContainerDied","Data":"ede73bc28a4025127850be79f5ca8a5d60041c139d329714b2e2ce99b84ff19d"} Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.178013 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ede73bc28a4025127850be79f5ca8a5d60041c139d329714b2e2ce99b84ff19d" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.186743 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.218453 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.9437899640000005 podStartE2EDuration="1m52.218424419s" podCreationTimestamp="2026-01-28 16:56:36 +0000 UTC" firstStartedPulling="2026-01-28 16:56:39.440395744 +0000 UTC m=+1302.998722632" lastFinishedPulling="2026-01-28 16:58:26.715030199 +0000 UTC m=+1410.273357087" observedRunningTime="2026-01-28 16:58:28.201003842 +0000 UTC m=+1411.759330730" watchObservedRunningTime="2026-01-28 16:58:28.218424419 +0000 UTC m=+1411.776751307" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.234138 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"3ca13332c88a08375e0d5fcfe6ddae9372629e98bbfcbff58b207c102ccd1db8"} Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.234206 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"6466350f59eeee88b0f50394996b6a65566615d09bd036a8e7ef327077d0b6ad"} Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.418447 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.429541 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-85w55" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.456135 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.591205 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnlpx\" (UniqueName: \"kubernetes.io/projected/49096c30-9fbe-45ef-8cb5-1808efde086b-kube-api-access-fnlpx\") pod \"49096c30-9fbe-45ef-8cb5-1808efde086b\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.591280 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49096c30-9fbe-45ef-8cb5-1808efde086b-operator-scripts\") pod \"49096c30-9fbe-45ef-8cb5-1808efde086b\" (UID: \"49096c30-9fbe-45ef-8cb5-1808efde086b\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.591540 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jvhc\" (UniqueName: \"kubernetes.io/projected/7ff226f3-96f3-413f-8f05-57e149367490-kube-api-access-5jvhc\") pod \"7ff226f3-96f3-413f-8f05-57e149367490\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.591568 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ff226f3-96f3-413f-8f05-57e149367490-operator-scripts\") pod \"7ff226f3-96f3-413f-8f05-57e149367490\" (UID: \"7ff226f3-96f3-413f-8f05-57e149367490\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.591647 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9895a837-fc16-4072-9fb8-9b79cb56b53b-operator-scripts\") pod \"9895a837-fc16-4072-9fb8-9b79cb56b53b\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.591701 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nzzp\" (UniqueName: \"kubernetes.io/projected/9895a837-fc16-4072-9fb8-9b79cb56b53b-kube-api-access-5nzzp\") pod \"9895a837-fc16-4072-9fb8-9b79cb56b53b\" (UID: \"9895a837-fc16-4072-9fb8-9b79cb56b53b\") " Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.592286 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49096c30-9fbe-45ef-8cb5-1808efde086b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "49096c30-9fbe-45ef-8cb5-1808efde086b" (UID: "49096c30-9fbe-45ef-8cb5-1808efde086b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.593302 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49096c30-9fbe-45ef-8cb5-1808efde086b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.596527 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49096c30-9fbe-45ef-8cb5-1808efde086b-kube-api-access-fnlpx" (OuterVolumeSpecName: "kube-api-access-fnlpx") pod "49096c30-9fbe-45ef-8cb5-1808efde086b" (UID: "49096c30-9fbe-45ef-8cb5-1808efde086b"). InnerVolumeSpecName "kube-api-access-fnlpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.596759 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ff226f3-96f3-413f-8f05-57e149367490-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7ff226f3-96f3-413f-8f05-57e149367490" (UID: "7ff226f3-96f3-413f-8f05-57e149367490"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.598510 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9895a837-fc16-4072-9fb8-9b79cb56b53b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9895a837-fc16-4072-9fb8-9b79cb56b53b" (UID: "9895a837-fc16-4072-9fb8-9b79cb56b53b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.602886 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ff226f3-96f3-413f-8f05-57e149367490-kube-api-access-5jvhc" (OuterVolumeSpecName: "kube-api-access-5jvhc") pod "7ff226f3-96f3-413f-8f05-57e149367490" (UID: "7ff226f3-96f3-413f-8f05-57e149367490"). InnerVolumeSpecName "kube-api-access-5jvhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.603134 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9895a837-fc16-4072-9fb8-9b79cb56b53b-kube-api-access-5nzzp" (OuterVolumeSpecName: "kube-api-access-5nzzp") pod "9895a837-fc16-4072-9fb8-9b79cb56b53b" (UID: "9895a837-fc16-4072-9fb8-9b79cb56b53b"). InnerVolumeSpecName "kube-api-access-5nzzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.701073 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ff226f3-96f3-413f-8f05-57e149367490-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.701119 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9895a837-fc16-4072-9fb8-9b79cb56b53b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.701133 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nzzp\" (UniqueName: \"kubernetes.io/projected/9895a837-fc16-4072-9fb8-9b79cb56b53b-kube-api-access-5nzzp\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.701150 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnlpx\" (UniqueName: \"kubernetes.io/projected/49096c30-9fbe-45ef-8cb5-1808efde086b-kube-api-access-fnlpx\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.701163 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jvhc\" (UniqueName: \"kubernetes.io/projected/7ff226f3-96f3-413f-8f05-57e149367490-kube-api-access-5jvhc\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:28 crc kubenswrapper[4877]: I0128 16:58:28.721792 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.247802 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-85w55" Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.247698 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-85w55" event={"ID":"7ff226f3-96f3-413f-8f05-57e149367490","Type":"ContainerDied","Data":"e89f3f36948435caebba7c8aba73734cfe3502f569aa6b56038717937b9e3869"} Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.248225 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e89f3f36948435caebba7c8aba73734cfe3502f569aa6b56038717937b9e3869" Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.249375 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ab30-account-create-update-k22df" event={"ID":"49096c30-9fbe-45ef-8cb5-1808efde086b","Type":"ContainerDied","Data":"a859bd6bec9ebd9f1b93d4c981f8b18961ebf5e03eab312cc417f8ba2372be59"} Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.249402 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a859bd6bec9ebd9f1b93d4c981f8b18961ebf5e03eab312cc417f8ba2372be59" Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.249441 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ab30-account-create-update-k22df" Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.251196 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-adb0-account-create-update-rgtpf" event={"ID":"9895a837-fc16-4072-9fb8-9b79cb56b53b","Type":"ContainerDied","Data":"f48eb40f7ee7f463f3e4e07fe6794d4a41379b4cbd4e0e1db3338cdeb77afa29"} Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.251237 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f48eb40f7ee7f463f3e4e07fe6794d4a41379b4cbd4e0e1db3338cdeb77afa29" Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.251234 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-adb0-account-create-update-rgtpf" Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.256246 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"75c60e2f42cdd9b4adc0f28c460019704cb6fa2a7004a79aeedccb6a09937087"} Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.256299 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"b24a8a012fee1c4ec0a02a78a00fd07ac372d78e5a83d97fbf914d7f5b25d952"} Jan 28 16:58:29 crc kubenswrapper[4877]: I0128 16:58:29.259006 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"a40862f9-9799-4bd4-9e3f-9d528cf5f50e","Type":"ContainerStarted","Data":"9f376dfe1c648c1fe27dabd49a4837f468b13a2ee0f4605d055378af07fdd5bb"} Jan 28 16:58:30 crc kubenswrapper[4877]: I0128 16:58:30.835807 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Jan 28 16:58:30 crc kubenswrapper[4877]: I0128 16:58:30.869708 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.096550 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.275844 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.634442 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-f8bsb"] Jan 28 16:58:31 crc kubenswrapper[4877]: E0128 16:58:31.635110 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ff226f3-96f3-413f-8f05-57e149367490" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635137 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ff226f3-96f3-413f-8f05-57e149367490" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: E0128 16:58:31.635157 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49096c30-9fbe-45ef-8cb5-1808efde086b" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635166 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="49096c30-9fbe-45ef-8cb5-1808efde086b" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: E0128 16:58:31.635180 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7450209-21cd-4ad0-b700-080ff83306e1" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635192 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7450209-21cd-4ad0-b700-080ff83306e1" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: E0128 16:58:31.635223 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9895a837-fc16-4072-9fb8-9b79cb56b53b" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635230 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9895a837-fc16-4072-9fb8-9b79cb56b53b" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635508 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="49096c30-9fbe-45ef-8cb5-1808efde086b" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635531 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="9895a837-fc16-4072-9fb8-9b79cb56b53b" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635552 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7450209-21cd-4ad0-b700-080ff83306e1" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.635569 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ff226f3-96f3-413f-8f05-57e149367490" containerName="mariadb-account-create-update" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.636553 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.659538 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-f8bsb"] Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.698544 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-e6a7-account-create-update-f2jl2"] Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.700053 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.705114 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.768355 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-e6a7-account-create-update-f2jl2"] Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.781624 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27ba5678-96ac-4505-8313-84cab2d57434-operator-scripts\") pod \"heat-db-create-f8bsb\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.781915 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjwgd\" (UniqueName: \"kubernetes.io/projected/6aa96069-1d33-441b-85e7-64c2a06b3860-kube-api-access-qjwgd\") pod \"heat-e6a7-account-create-update-f2jl2\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.781990 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6aa96069-1d33-441b-85e7-64c2a06b3860-operator-scripts\") pod \"heat-e6a7-account-create-update-f2jl2\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.782067 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq78p\" (UniqueName: \"kubernetes.io/projected/27ba5678-96ac-4505-8313-84cab2d57434-kube-api-access-wq78p\") pod \"heat-db-create-f8bsb\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.888961 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjwgd\" (UniqueName: \"kubernetes.io/projected/6aa96069-1d33-441b-85e7-64c2a06b3860-kube-api-access-qjwgd\") pod \"heat-e6a7-account-create-update-f2jl2\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.889031 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6aa96069-1d33-441b-85e7-64c2a06b3860-operator-scripts\") pod \"heat-e6a7-account-create-update-f2jl2\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.889071 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq78p\" (UniqueName: \"kubernetes.io/projected/27ba5678-96ac-4505-8313-84cab2d57434-kube-api-access-wq78p\") pod \"heat-db-create-f8bsb\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.889170 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27ba5678-96ac-4505-8313-84cab2d57434-operator-scripts\") pod \"heat-db-create-f8bsb\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.893355 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6aa96069-1d33-441b-85e7-64c2a06b3860-operator-scripts\") pod \"heat-e6a7-account-create-update-f2jl2\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.893372 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-mxsg9"] Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.895662 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.899161 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27ba5678-96ac-4505-8313-84cab2d57434-operator-scripts\") pod \"heat-db-create-f8bsb\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.947034 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjwgd\" (UniqueName: \"kubernetes.io/projected/6aa96069-1d33-441b-85e7-64c2a06b3860-kube-api-access-qjwgd\") pod \"heat-e6a7-account-create-update-f2jl2\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.965413 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq78p\" (UniqueName: \"kubernetes.io/projected/27ba5678-96ac-4505-8313-84cab2d57434-kube-api-access-wq78p\") pod \"heat-db-create-f8bsb\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:31 crc kubenswrapper[4877]: I0128 16:58:31.982262 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-mxsg9"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.009671 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-qg5jt"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.011261 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.048190 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-qg5jt"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.048894 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.101319 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvf9w\" (UniqueName: \"kubernetes.io/projected/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-kube-api-access-zvf9w\") pod \"cinder-db-create-mxsg9\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.101417 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-operator-scripts\") pod \"cinder-db-create-mxsg9\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.102259 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.118852 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d6a6-account-create-update-zc2m5"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.120995 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.145188 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.167170 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d6a6-account-create-update-zc2m5"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.185125 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-v5fwc"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.187443 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.212161 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvf9w\" (UniqueName: \"kubernetes.io/projected/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-kube-api-access-zvf9w\") pod \"cinder-db-create-mxsg9\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.217266 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-operator-scripts\") pod \"cinder-db-create-mxsg9\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.217348 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcsv8\" (UniqueName: \"kubernetes.io/projected/f33aa46e-bc47-4499-b263-7a0152340bc4-kube-api-access-lcsv8\") pod \"barbican-db-create-qg5jt\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.217398 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33aa46e-bc47-4499-b263-7a0152340bc4-operator-scripts\") pod \"barbican-db-create-qg5jt\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.218294 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-operator-scripts\") pod \"cinder-db-create-mxsg9\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.263299 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-v5fwc"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.278135 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvf9w\" (UniqueName: \"kubernetes.io/projected/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-kube-api-access-zvf9w\") pod \"cinder-db-create-mxsg9\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.286742 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d27d-account-create-update-vqv6l"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.296802 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.299618 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d27d-account-create-update-vqv6l"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.305579 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.322998 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-526f-account-create-update-ngljr"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.323352 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b658b847-5c12-40f3-8602-a0897a18066f-operator-scripts\") pod \"neutron-db-create-v5fwc\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.323499 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhlmq\" (UniqueName: \"kubernetes.io/projected/d6c0346e-3510-4fa2-ac67-46bed9447c6b-kube-api-access-nhlmq\") pod \"cinder-d6a6-account-create-update-zc2m5\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.323800 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6vjs\" (UniqueName: \"kubernetes.io/projected/b73dca88-cd81-4389-86cf-dd7973a4e489-kube-api-access-p6vjs\") pod \"barbican-d27d-account-create-update-vqv6l\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.323893 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6c0346e-3510-4fa2-ac67-46bed9447c6b-operator-scripts\") pod \"cinder-d6a6-account-create-update-zc2m5\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.323954 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b73dca88-cd81-4389-86cf-dd7973a4e489-operator-scripts\") pod \"barbican-d27d-account-create-update-vqv6l\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.324000 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcsv8\" (UniqueName: \"kubernetes.io/projected/f33aa46e-bc47-4499-b263-7a0152340bc4-kube-api-access-lcsv8\") pod \"barbican-db-create-qg5jt\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.324039 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33aa46e-bc47-4499-b263-7a0152340bc4-operator-scripts\") pod \"barbican-db-create-qg5jt\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.324121 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glwjb\" (UniqueName: \"kubernetes.io/projected/b658b847-5c12-40f3-8602-a0897a18066f-kube-api-access-glwjb\") pod \"neutron-db-create-v5fwc\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.325127 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.333232 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33aa46e-bc47-4499-b263-7a0152340bc4-operator-scripts\") pod \"barbican-db-create-qg5jt\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.333284 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.335752 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-526f-account-create-update-ngljr"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.365781 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcsv8\" (UniqueName: \"kubernetes.io/projected/f33aa46e-bc47-4499-b263-7a0152340bc4-kube-api-access-lcsv8\") pod \"barbican-db-create-qg5jt\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.371763 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.377302 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"796b0265a57d4afbb36444e540ce1088143c058c302a42647f0b902643b33637"} Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.397652 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"a40862f9-9799-4bd4-9e3f-9d528cf5f50e","Type":"ContainerStarted","Data":"94f9545ebcef02a0b88178061a93c8b89f464c7ce525b689263733d646a36d7b"} Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.428345 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6vjs\" (UniqueName: \"kubernetes.io/projected/b73dca88-cd81-4389-86cf-dd7973a4e489-kube-api-access-p6vjs\") pod \"barbican-d27d-account-create-update-vqv6l\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.428448 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6c0346e-3510-4fa2-ac67-46bed9447c6b-operator-scripts\") pod \"cinder-d6a6-account-create-update-zc2m5\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.428517 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b73dca88-cd81-4389-86cf-dd7973a4e489-operator-scripts\") pod \"barbican-d27d-account-create-update-vqv6l\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.428589 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glwjb\" (UniqueName: \"kubernetes.io/projected/b658b847-5c12-40f3-8602-a0897a18066f-kube-api-access-glwjb\") pod \"neutron-db-create-v5fwc\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.428628 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b658b847-5c12-40f3-8602-a0897a18066f-operator-scripts\") pod \"neutron-db-create-v5fwc\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.428673 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhlmq\" (UniqueName: \"kubernetes.io/projected/d6c0346e-3510-4fa2-ac67-46bed9447c6b-kube-api-access-nhlmq\") pod \"cinder-d6a6-account-create-update-zc2m5\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.430077 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6c0346e-3510-4fa2-ac67-46bed9447c6b-operator-scripts\") pod \"cinder-d6a6-account-create-update-zc2m5\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.430070 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b73dca88-cd81-4389-86cf-dd7973a4e489-operator-scripts\") pod \"barbican-d27d-account-create-update-vqv6l\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.430659 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b658b847-5c12-40f3-8602-a0897a18066f-operator-scripts\") pod \"neutron-db-create-v5fwc\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.463300 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhlmq\" (UniqueName: \"kubernetes.io/projected/d6c0346e-3510-4fa2-ac67-46bed9447c6b-kube-api-access-nhlmq\") pod \"cinder-d6a6-account-create-update-zc2m5\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.470820 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glwjb\" (UniqueName: \"kubernetes.io/projected/b658b847-5c12-40f3-8602-a0897a18066f-kube-api-access-glwjb\") pod \"neutron-db-create-v5fwc\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.471251 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6vjs\" (UniqueName: \"kubernetes.io/projected/b73dca88-cd81-4389-86cf-dd7973a4e489-kube-api-access-p6vjs\") pod \"barbican-d27d-account-create-update-vqv6l\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.474263 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.483985 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=3.378328959 podStartE2EDuration="5.483959217s" podCreationTimestamp="2026-01-28 16:58:27 +0000 UTC" firstStartedPulling="2026-01-28 16:58:28.69424194 +0000 UTC m=+1412.252568848" lastFinishedPulling="2026-01-28 16:58:30.799872218 +0000 UTC m=+1414.358199106" observedRunningTime="2026-01-28 16:58:32.438710046 +0000 UTC m=+1415.997036954" watchObservedRunningTime="2026-01-28 16:58:32.483959217 +0000 UTC m=+1416.042286095" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.513879 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-85w55"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.521441 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-85w55"] Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.531226 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8be5d3d7-b0d0-47ba-906c-efe5b871f035-operator-scripts\") pod \"neutron-526f-account-create-update-ngljr\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.532202 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w8ff\" (UniqueName: \"kubernetes.io/projected/8be5d3d7-b0d0-47ba-906c-efe5b871f035-kube-api-access-2w8ff\") pod \"neutron-526f-account-create-update-ngljr\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.587715 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.607557 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.634373 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w8ff\" (UniqueName: \"kubernetes.io/projected/8be5d3d7-b0d0-47ba-906c-efe5b871f035-kube-api-access-2w8ff\") pod \"neutron-526f-account-create-update-ngljr\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.634955 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8be5d3d7-b0d0-47ba-906c-efe5b871f035-operator-scripts\") pod \"neutron-526f-account-create-update-ngljr\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.636126 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8be5d3d7-b0d0-47ba-906c-efe5b871f035-operator-scripts\") pod \"neutron-526f-account-create-update-ngljr\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.647702 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.672836 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w8ff\" (UniqueName: \"kubernetes.io/projected/8be5d3d7-b0d0-47ba-906c-efe5b871f035-kube-api-access-2w8ff\") pod \"neutron-526f-account-create-update-ngljr\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.676361 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:32 crc kubenswrapper[4877]: I0128 16:58:32.939741 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-f8bsb"] Jan 28 16:58:32 crc kubenswrapper[4877]: W0128 16:58:32.961830 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27ba5678_96ac_4505_8313_84cab2d57434.slice/crio-9e85d1959f3267dd6f9fe820ea17410b8ff240bd838ddad304a32ff33cc32366 WatchSource:0}: Error finding container 9e85d1959f3267dd6f9fe820ea17410b8ff240bd838ddad304a32ff33cc32366: Status 404 returned error can't find the container with id 9e85d1959f3267dd6f9fe820ea17410b8ff240bd838ddad304a32ff33cc32366 Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.178849 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-e6a7-account-create-update-f2jl2"] Jan 28 16:58:33 crc kubenswrapper[4877]: W0128 16:58:33.196075 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6aa96069_1d33_441b_85e7_64c2a06b3860.slice/crio-ee1a2f69ffa9e1ca24d169168c5800932dec9608e3576a57e683d676b883098a WatchSource:0}: Error finding container ee1a2f69ffa9e1ca24d169168c5800932dec9608e3576a57e683d676b883098a: Status 404 returned error can't find the container with id ee1a2f69ffa9e1ca24d169168c5800932dec9608e3576a57e683d676b883098a Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.345913 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ff226f3-96f3-413f-8f05-57e149367490" path="/var/lib/kubelet/pods/7ff226f3-96f3-413f-8f05-57e149367490/volumes" Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.426557 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"01f44d867cb911202f0a6efa22fd239930c5059f43fdd9eac899db5beab2b2be"} Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.431168 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-f8bsb" event={"ID":"27ba5678-96ac-4505-8313-84cab2d57434","Type":"ContainerStarted","Data":"9e85d1959f3267dd6f9fe820ea17410b8ff240bd838ddad304a32ff33cc32366"} Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.433202 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-e6a7-account-create-update-f2jl2" event={"ID":"6aa96069-1d33-441b-85e7-64c2a06b3860","Type":"ContainerStarted","Data":"ee1a2f69ffa9e1ca24d169168c5800932dec9608e3576a57e683d676b883098a"} Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.499962 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-mxsg9"] Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.829103 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d6a6-account-create-update-zc2m5"] Jan 28 16:58:33 crc kubenswrapper[4877]: W0128 16:58:33.870028 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6c0346e_3510_4fa2_ac67_46bed9447c6b.slice/crio-31aa7cf69fd88c90067ca17fa6d0cc0c01972d8eb8cf3c8b395c859ddc2597b8 WatchSource:0}: Error finding container 31aa7cf69fd88c90067ca17fa6d0cc0c01972d8eb8cf3c8b395c859ddc2597b8: Status 404 returned error can't find the container with id 31aa7cf69fd88c90067ca17fa6d0cc0c01972d8eb8cf3c8b395c859ddc2597b8 Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.913840 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d27d-account-create-update-vqv6l"] Jan 28 16:58:33 crc kubenswrapper[4877]: W0128 16:58:33.928932 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb658b847_5c12_40f3_8602_a0897a18066f.slice/crio-aca1dfee07a0e9fb19628ea9fe04bc7dee739bacd447811d24a67c16e6330455 WatchSource:0}: Error finding container aca1dfee07a0e9fb19628ea9fe04bc7dee739bacd447811d24a67c16e6330455: Status 404 returned error can't find the container with id aca1dfee07a0e9fb19628ea9fe04bc7dee739bacd447811d24a67c16e6330455 Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.949856 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-v5fwc"] Jan 28 16:58:33 crc kubenswrapper[4877]: I0128 16:58:33.996805 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-qg5jt"] Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.017697 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-526f-account-create-update-ngljr"] Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.473710 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qg5jt" event={"ID":"f33aa46e-bc47-4499-b263-7a0152340bc4","Type":"ContainerStarted","Data":"3f179b9a76e54e3166dae3268ac7b8e9152d7a51ae2d84d849a5458da9e97539"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.495060 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d27d-account-create-update-vqv6l" event={"ID":"b73dca88-cd81-4389-86cf-dd7973a4e489","Type":"ContainerStarted","Data":"3935cc187958c29f8d4b5dd3f7a759514ff1157a7f0293362b8fccc5e22d2d0c"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.507595 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-f8bsb" event={"ID":"27ba5678-96ac-4505-8313-84cab2d57434","Type":"ContainerStarted","Data":"337b44ba67d4d100a74e91e853b309c31b28a5925eea01a43b24d192e0a1790a"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.527178 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-mxsg9" event={"ID":"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d","Type":"ContainerStarted","Data":"c3138c25344035be31cb31f1f3aea8098572e5eb4087600f89d10726f58a58d7"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.527235 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-mxsg9" event={"ID":"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d","Type":"ContainerStarted","Data":"8d2bde655a093fda4b2515fca9a5169f87df0b2eb1ae7c0fbd7457d8343b51c1"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.549013 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v5fwc" event={"ID":"b658b847-5c12-40f3-8602-a0897a18066f","Type":"ContainerStarted","Data":"ddce209fc785587cb17240da78fe293bc3162cc21168d9e0a6ffcba87c4db0cb"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.549074 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v5fwc" event={"ID":"b658b847-5c12-40f3-8602-a0897a18066f","Type":"ContainerStarted","Data":"aca1dfee07a0e9fb19628ea9fe04bc7dee739bacd447811d24a67c16e6330455"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.550272 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-f8bsb" podStartSLOduration=3.550254881 podStartE2EDuration="3.550254881s" podCreationTimestamp="2026-01-28 16:58:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:34.534928361 +0000 UTC m=+1418.093255249" watchObservedRunningTime="2026-01-28 16:58:34.550254881 +0000 UTC m=+1418.108581769" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.581165 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d6a6-account-create-update-zc2m5" event={"ID":"d6c0346e-3510-4fa2-ac67-46bed9447c6b","Type":"ContainerStarted","Data":"3b09f7d0955c178d2f7cc818b6c262d984c7364ec0cdc0e57f632b977dd1613c"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.581219 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d6a6-account-create-update-zc2m5" event={"ID":"d6c0346e-3510-4fa2-ac67-46bed9447c6b","Type":"ContainerStarted","Data":"31aa7cf69fd88c90067ca17fa6d0cc0c01972d8eb8cf3c8b395c859ddc2597b8"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.597333 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-mxsg9" podStartSLOduration=3.597305971 podStartE2EDuration="3.597305971s" podCreationTimestamp="2026-01-28 16:58:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:34.575120858 +0000 UTC m=+1418.133447746" watchObservedRunningTime="2026-01-28 16:58:34.597305971 +0000 UTC m=+1418.155632859" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.599857 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-526f-account-create-update-ngljr" event={"ID":"8be5d3d7-b0d0-47ba-906c-efe5b871f035","Type":"ContainerStarted","Data":"d86e45521b97f6dba47ad291e7d55de0cdce9016a03778f70bb153cbaf614bc1"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.615299 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-e6a7-account-create-update-f2jl2" event={"ID":"6aa96069-1d33-441b-85e7-64c2a06b3860","Type":"ContainerStarted","Data":"fbd4fe8af9b9adc070ca706800ff14bed5a29c3c5d41b226d95526ec439ed16d"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.682787 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-lptvh"] Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.684292 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.695627 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-v5fwc" podStartSLOduration=2.695596274 podStartE2EDuration="2.695596274s" podCreationTimestamp="2026-01-28 16:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:34.626114613 +0000 UTC m=+1418.184441501" watchObservedRunningTime="2026-01-28 16:58:34.695596274 +0000 UTC m=+1418.253923192" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.696170 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.696754 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jptj6" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.708950 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"566d4f8a87556335a45ba5e8f78b529b3fc6804c3a58bae83c441bdfeb18c764"} Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.747278 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lptvh"] Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.748429 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-e6a7-account-create-update-f2jl2" podStartSLOduration=3.7484026569999997 podStartE2EDuration="3.748402657s" podCreationTimestamp="2026-01-28 16:58:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:34.662429185 +0000 UTC m=+1418.220756073" watchObservedRunningTime="2026-01-28 16:58:34.748402657 +0000 UTC m=+1418.306729545" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.768771 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-526f-account-create-update-ngljr" podStartSLOduration=2.768747242 podStartE2EDuration="2.768747242s" podCreationTimestamp="2026-01-28 16:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:34.696370114 +0000 UTC m=+1418.254697002" watchObservedRunningTime="2026-01-28 16:58:34.768747242 +0000 UTC m=+1418.327074130" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.792904 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-g4wc7"] Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.794629 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.796522 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-d6a6-account-create-update-zc2m5" podStartSLOduration=3.796501496 podStartE2EDuration="3.796501496s" podCreationTimestamp="2026-01-28 16:58:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:34.758692043 +0000 UTC m=+1418.317018931" watchObservedRunningTime="2026-01-28 16:58:34.796501496 +0000 UTC m=+1418.354828384" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.802960 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-45ggb" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.803103 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.803236 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.812721 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.818829 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-g4wc7"] Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.819726 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-db-sync-config-data\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.819819 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-combined-ca-bundle\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.819847 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zvpx\" (UniqueName: \"kubernetes.io/projected/534e973d-d29f-4aac-8922-5f42d27c0770-kube-api-access-9zvpx\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.819992 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-config-data\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.922811 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-config-data\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.923100 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-combined-ca-bundle\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.923171 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-config-data\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.923202 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cw9m\" (UniqueName: \"kubernetes.io/projected/424aed47-f19b-40f2-b8c1-dfc24b8d605d-kube-api-access-7cw9m\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.923247 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-db-sync-config-data\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.923292 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-combined-ca-bundle\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.923312 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zvpx\" (UniqueName: \"kubernetes.io/projected/534e973d-d29f-4aac-8922-5f42d27c0770-kube-api-access-9zvpx\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.943871 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-combined-ca-bundle\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.944053 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-config-data\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.948029 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zvpx\" (UniqueName: \"kubernetes.io/projected/534e973d-d29f-4aac-8922-5f42d27c0770-kube-api-access-9zvpx\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:34 crc kubenswrapper[4877]: I0128 16:58:34.948354 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-db-sync-config-data\") pod \"glance-db-sync-lptvh\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.025557 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cw9m\" (UniqueName: \"kubernetes.io/projected/424aed47-f19b-40f2-b8c1-dfc24b8d605d-kube-api-access-7cw9m\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.025725 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-config-data\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.025751 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-combined-ca-bundle\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.030550 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-combined-ca-bundle\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.032607 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-config-data\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.057638 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cw9m\" (UniqueName: \"kubernetes.io/projected/424aed47-f19b-40f2-b8c1-dfc24b8d605d-kube-api-access-7cw9m\") pod \"keystone-db-sync-g4wc7\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.062987 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lptvh" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.217261 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.738967 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d27d-account-create-update-vqv6l" event={"ID":"b73dca88-cd81-4389-86cf-dd7973a4e489","Type":"ContainerStarted","Data":"3e8d6e228c3d7e3bc8b5bae2522e027386953e5c58e9387d7140f5ee18f9962b"} Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.748150 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-526f-account-create-update-ngljr" event={"ID":"8be5d3d7-b0d0-47ba-906c-efe5b871f035","Type":"ContainerStarted","Data":"5d36b2a06fe50bbb399287c88c54c7a07f7826a44098f1b106dee87fb3fa968f"} Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.750491 4877 generic.go:334] "Generic (PLEG): container finished" podID="27ba5678-96ac-4505-8313-84cab2d57434" containerID="337b44ba67d4d100a74e91e853b309c31b28a5925eea01a43b24d192e0a1790a" exitCode=0 Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.750537 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-f8bsb" event={"ID":"27ba5678-96ac-4505-8313-84cab2d57434","Type":"ContainerDied","Data":"337b44ba67d4d100a74e91e853b309c31b28a5925eea01a43b24d192e0a1790a"} Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.764349 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-d27d-account-create-update-vqv6l" podStartSLOduration=3.7643317229999997 podStartE2EDuration="3.764331723s" podCreationTimestamp="2026-01-28 16:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:35.759462943 +0000 UTC m=+1419.317789831" watchObservedRunningTime="2026-01-28 16:58:35.764331723 +0000 UTC m=+1419.322658611" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.792611 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"c4e0575fe16f962f94c49a0b6a1237ffac79cfdd64ec367a2d0b0485fcfe450f"} Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.792665 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"d9977d204ad5b5b056a3f30d9462179929646a66c60c17fa1b1ee4543560ebf8"} Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.798619 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qg5jt" event={"ID":"f33aa46e-bc47-4499-b263-7a0152340bc4","Type":"ContainerStarted","Data":"6a3391eb336cb96ec928f057e9c3b5501fe31809d533255e31189ca9860b0a49"} Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.830908 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-qg5jt" podStartSLOduration=4.830882786 podStartE2EDuration="4.830882786s" podCreationTimestamp="2026-01-28 16:58:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:35.824242988 +0000 UTC m=+1419.382569876" watchObservedRunningTime="2026-01-28 16:58:35.830882786 +0000 UTC m=+1419.389209664" Jan 28 16:58:35 crc kubenswrapper[4877]: E0128 16:58:35.862139 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27ba5678_96ac_4505_8313_84cab2d57434.slice/crio-337b44ba67d4d100a74e91e853b309c31b28a5925eea01a43b24d192e0a1790a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27ba5678_96ac_4505_8313_84cab2d57434.slice/crio-conmon-337b44ba67d4d100a74e91e853b309c31b28a5925eea01a43b24d192e0a1790a.scope\": RecentStats: unable to find data in memory cache]" Jan 28 16:58:35 crc kubenswrapper[4877]: I0128 16:58:35.927225 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lptvh"] Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.012758 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-g4wc7"] Jan 28 16:58:36 crc kubenswrapper[4877]: W0128 16:58:36.099162 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod424aed47_f19b_40f2_b8c1_dfc24b8d605d.slice/crio-38a518a91142158688eeea5dfddd4cd8b9792e1693ef05f9705bcf7e7d085cb0 WatchSource:0}: Error finding container 38a518a91142158688eeea5dfddd4cd8b9792e1693ef05f9705bcf7e7d085cb0: Status 404 returned error can't find the container with id 38a518a91142158688eeea5dfddd4cd8b9792e1693ef05f9705bcf7e7d085cb0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.828033 4877 generic.go:334] "Generic (PLEG): container finished" podID="8be5d3d7-b0d0-47ba-906c-efe5b871f035" containerID="5d36b2a06fe50bbb399287c88c54c7a07f7826a44098f1b106dee87fb3fa968f" exitCode=0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.828243 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-526f-account-create-update-ngljr" event={"ID":"8be5d3d7-b0d0-47ba-906c-efe5b871f035","Type":"ContainerDied","Data":"5d36b2a06fe50bbb399287c88c54c7a07f7826a44098f1b106dee87fb3fa968f"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.834099 4877 generic.go:334] "Generic (PLEG): container finished" podID="6aa96069-1d33-441b-85e7-64c2a06b3860" containerID="fbd4fe8af9b9adc070ca706800ff14bed5a29c3c5d41b226d95526ec439ed16d" exitCode=0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.834181 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-e6a7-account-create-update-f2jl2" event={"ID":"6aa96069-1d33-441b-85e7-64c2a06b3860","Type":"ContainerDied","Data":"fbd4fe8af9b9adc070ca706800ff14bed5a29c3c5d41b226d95526ec439ed16d"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.839810 4877 generic.go:334] "Generic (PLEG): container finished" podID="b658b847-5c12-40f3-8602-a0897a18066f" containerID="ddce209fc785587cb17240da78fe293bc3162cc21168d9e0a6ffcba87c4db0cb" exitCode=0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.839959 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v5fwc" event={"ID":"b658b847-5c12-40f3-8602-a0897a18066f","Type":"ContainerDied","Data":"ddce209fc785587cb17240da78fe293bc3162cc21168d9e0a6ffcba87c4db0cb"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.845077 4877 generic.go:334] "Generic (PLEG): container finished" podID="d6c0346e-3510-4fa2-ac67-46bed9447c6b" containerID="3b09f7d0955c178d2f7cc818b6c262d984c7364ec0cdc0e57f632b977dd1613c" exitCode=0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.845136 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d6a6-account-create-update-zc2m5" event={"ID":"d6c0346e-3510-4fa2-ac67-46bed9447c6b","Type":"ContainerDied","Data":"3b09f7d0955c178d2f7cc818b6c262d984c7364ec0cdc0e57f632b977dd1613c"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.855695 4877 generic.go:334] "Generic (PLEG): container finished" podID="f33aa46e-bc47-4499-b263-7a0152340bc4" containerID="6a3391eb336cb96ec928f057e9c3b5501fe31809d533255e31189ca9860b0a49" exitCode=0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.855928 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qg5jt" event={"ID":"f33aa46e-bc47-4499-b263-7a0152340bc4","Type":"ContainerDied","Data":"6a3391eb336cb96ec928f057e9c3b5501fe31809d533255e31189ca9860b0a49"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.867004 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-g4wc7" event={"ID":"424aed47-f19b-40f2-b8c1-dfc24b8d605d","Type":"ContainerStarted","Data":"38a518a91142158688eeea5dfddd4cd8b9792e1693ef05f9705bcf7e7d085cb0"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.870181 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lptvh" event={"ID":"534e973d-d29f-4aac-8922-5f42d27c0770","Type":"ContainerStarted","Data":"6b9c06660e5730d7b9e6e3cee148ca7da69c2214fb29f2bfd6e619aa75b182e0"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.886640 4877 generic.go:334] "Generic (PLEG): container finished" podID="b73dca88-cd81-4389-86cf-dd7973a4e489" containerID="3e8d6e228c3d7e3bc8b5bae2522e027386953e5c58e9387d7140f5ee18f9962b" exitCode=0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.886729 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d27d-account-create-update-vqv6l" event={"ID":"b73dca88-cd81-4389-86cf-dd7973a4e489","Type":"ContainerDied","Data":"3e8d6e228c3d7e3bc8b5bae2522e027386953e5c58e9387d7140f5ee18f9962b"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.894753 4877 generic.go:334] "Generic (PLEG): container finished" podID="7f0118a1-6a58-42e3-b8af-bbc0bbd6777d" containerID="c3138c25344035be31cb31f1f3aea8098572e5eb4087600f89d10726f58a58d7" exitCode=0 Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.895059 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-mxsg9" event={"ID":"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d","Type":"ContainerDied","Data":"c3138c25344035be31cb31f1f3aea8098572e5eb4087600f89d10726f58a58d7"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.909629 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"8378807923aea03057f043558319b635c44062edca4351262aadaba061736180"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.909674 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"03f31376-451f-4360-bea2-4d4e557568f0","Type":"ContainerStarted","Data":"d49816fe5954fc3054be9d41fd989bd7e3674db8762f3f3f4b9f57e9b5daeea0"} Jan 28 16:58:36 crc kubenswrapper[4877]: I0128 16:58:36.997899 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=53.658749386 podStartE2EDuration="1m10.997877117s" podCreationTimestamp="2026-01-28 16:57:26 +0000 UTC" firstStartedPulling="2026-01-28 16:58:13.4578765 +0000 UTC m=+1397.016203388" lastFinishedPulling="2026-01-28 16:58:30.797004231 +0000 UTC m=+1414.355331119" observedRunningTime="2026-01-28 16:58:36.983297237 +0000 UTC m=+1420.541624135" watchObservedRunningTime="2026-01-28 16:58:36.997877117 +0000 UTC m=+1420.556204005" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.427169 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.432793 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-fblln"] Jan 28 16:58:37 crc kubenswrapper[4877]: E0128 16:58:37.433432 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27ba5678-96ac-4505-8313-84cab2d57434" containerName="mariadb-database-create" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.433459 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="27ba5678-96ac-4505-8313-84cab2d57434" containerName="mariadb-database-create" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.433823 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="27ba5678-96ac-4505-8313-84cab2d57434" containerName="mariadb-database-create" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.435773 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-fblln"] Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.435912 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.439876 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.467542 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq78p\" (UniqueName: \"kubernetes.io/projected/27ba5678-96ac-4505-8313-84cab2d57434-kube-api-access-wq78p\") pod \"27ba5678-96ac-4505-8313-84cab2d57434\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.467687 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27ba5678-96ac-4505-8313-84cab2d57434-operator-scripts\") pod \"27ba5678-96ac-4505-8313-84cab2d57434\" (UID: \"27ba5678-96ac-4505-8313-84cab2d57434\") " Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.468442 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-config\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.468534 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.468586 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.468616 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.468664 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nvjr\" (UniqueName: \"kubernetes.io/projected/16116e77-e71d-41b1-a821-2c01fbbd71ae-kube-api-access-7nvjr\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.468728 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.469226 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27ba5678-96ac-4505-8313-84cab2d57434-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27ba5678-96ac-4505-8313-84cab2d57434" (UID: "27ba5678-96ac-4505-8313-84cab2d57434"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.548637 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-4rgth"] Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.550784 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.553087 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.570487 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26904586-2e5c-44de-b91c-0a6d288d6d9e-operator-scripts\") pod \"root-account-create-update-4rgth\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.570654 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-config\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.570719 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sx8r\" (UniqueName: \"kubernetes.io/projected/26904586-2e5c-44de-b91c-0a6d288d6d9e-kube-api-access-2sx8r\") pod \"root-account-create-update-4rgth\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.570829 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.570867 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.570926 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.571144 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nvjr\" (UniqueName: \"kubernetes.io/projected/16116e77-e71d-41b1-a821-2c01fbbd71ae-kube-api-access-7nvjr\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.571233 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.571744 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27ba5678-96ac-4505-8313-84cab2d57434-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.573468 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.573563 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.574885 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-config\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.575178 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.575747 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.585431 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-4rgth"] Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.622012 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nvjr\" (UniqueName: \"kubernetes.io/projected/16116e77-e71d-41b1-a821-2c01fbbd71ae-kube-api-access-7nvjr\") pod \"dnsmasq-dns-77585f5f8c-fblln\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.675985 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26904586-2e5c-44de-b91c-0a6d288d6d9e-operator-scripts\") pod \"root-account-create-update-4rgth\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.676399 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sx8r\" (UniqueName: \"kubernetes.io/projected/26904586-2e5c-44de-b91c-0a6d288d6d9e-kube-api-access-2sx8r\") pod \"root-account-create-update-4rgth\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.678026 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26904586-2e5c-44de-b91c-0a6d288d6d9e-operator-scripts\") pod \"root-account-create-update-4rgth\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.718566 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sx8r\" (UniqueName: \"kubernetes.io/projected/26904586-2e5c-44de-b91c-0a6d288d6d9e-kube-api-access-2sx8r\") pod \"root-account-create-update-4rgth\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.766324 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.873797 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.930201 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-f8bsb" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.930984 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-f8bsb" event={"ID":"27ba5678-96ac-4505-8313-84cab2d57434","Type":"ContainerDied","Data":"9e85d1959f3267dd6f9fe820ea17410b8ff240bd838ddad304a32ff33cc32366"} Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.931026 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e85d1959f3267dd6f9fe820ea17410b8ff240bd838ddad304a32ff33cc32366" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.939346 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27ba5678-96ac-4505-8313-84cab2d57434-kube-api-access-wq78p" (OuterVolumeSpecName: "kube-api-access-wq78p") pod "27ba5678-96ac-4505-8313-84cab2d57434" (UID: "27ba5678-96ac-4505-8313-84cab2d57434"). InnerVolumeSpecName "kube-api-access-wq78p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:37 crc kubenswrapper[4877]: I0128 16:58:37.984790 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq78p\" (UniqueName: \"kubernetes.io/projected/27ba5678-96ac-4505-8313-84cab2d57434-kube-api-access-wq78p\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.187137 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.198842 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.713791 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.745742 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.807432 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glwjb\" (UniqueName: \"kubernetes.io/projected/b658b847-5c12-40f3-8602-a0897a18066f-kube-api-access-glwjb\") pod \"b658b847-5c12-40f3-8602-a0897a18066f\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.807626 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b73dca88-cd81-4389-86cf-dd7973a4e489-operator-scripts\") pod \"b73dca88-cd81-4389-86cf-dd7973a4e489\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.807670 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6vjs\" (UniqueName: \"kubernetes.io/projected/b73dca88-cd81-4389-86cf-dd7973a4e489-kube-api-access-p6vjs\") pod \"b73dca88-cd81-4389-86cf-dd7973a4e489\" (UID: \"b73dca88-cd81-4389-86cf-dd7973a4e489\") " Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.807838 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b658b847-5c12-40f3-8602-a0897a18066f-operator-scripts\") pod \"b658b847-5c12-40f3-8602-a0897a18066f\" (UID: \"b658b847-5c12-40f3-8602-a0897a18066f\") " Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.814829 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b658b847-5c12-40f3-8602-a0897a18066f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b658b847-5c12-40f3-8602-a0897a18066f" (UID: "b658b847-5c12-40f3-8602-a0897a18066f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.815022 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b73dca88-cd81-4389-86cf-dd7973a4e489-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b73dca88-cd81-4389-86cf-dd7973a4e489" (UID: "b73dca88-cd81-4389-86cf-dd7973a4e489"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.846387 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b73dca88-cd81-4389-86cf-dd7973a4e489-kube-api-access-p6vjs" (OuterVolumeSpecName: "kube-api-access-p6vjs") pod "b73dca88-cd81-4389-86cf-dd7973a4e489" (UID: "b73dca88-cd81-4389-86cf-dd7973a4e489"). InnerVolumeSpecName "kube-api-access-p6vjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.849675 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b658b847-5c12-40f3-8602-a0897a18066f-kube-api-access-glwjb" (OuterVolumeSpecName: "kube-api-access-glwjb") pod "b658b847-5c12-40f3-8602-a0897a18066f" (UID: "b658b847-5c12-40f3-8602-a0897a18066f"). InnerVolumeSpecName "kube-api-access-glwjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.913996 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b73dca88-cd81-4389-86cf-dd7973a4e489-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.914057 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6vjs\" (UniqueName: \"kubernetes.io/projected/b73dca88-cd81-4389-86cf-dd7973a4e489-kube-api-access-p6vjs\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.914074 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b658b847-5c12-40f3-8602-a0897a18066f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.914086 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glwjb\" (UniqueName: \"kubernetes.io/projected/b658b847-5c12-40f3-8602-a0897a18066f-kube-api-access-glwjb\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.977066 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d27d-account-create-update-vqv6l" event={"ID":"b73dca88-cd81-4389-86cf-dd7973a4e489","Type":"ContainerDied","Data":"3935cc187958c29f8d4b5dd3f7a759514ff1157a7f0293362b8fccc5e22d2d0c"} Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.977109 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3935cc187958c29f8d4b5dd3f7a759514ff1157a7f0293362b8fccc5e22d2d0c" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.977173 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d27d-account-create-update-vqv6l" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.988382 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v5fwc" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.988615 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v5fwc" event={"ID":"b658b847-5c12-40f3-8602-a0897a18066f","Type":"ContainerDied","Data":"aca1dfee07a0e9fb19628ea9fe04bc7dee739bacd447811d24a67c16e6330455"} Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.988687 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aca1dfee07a0e9fb19628ea9fe04bc7dee739bacd447811d24a67c16e6330455" Jan 28 16:58:38 crc kubenswrapper[4877]: I0128 16:58:38.989326 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.004657 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.080099 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.080673 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.094806 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.104897 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.120355 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-operator-scripts\") pod \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.120497 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvf9w\" (UniqueName: \"kubernetes.io/projected/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-kube-api-access-zvf9w\") pod \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\" (UID: \"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.120892 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f0118a1-6a58-42e3-b8af-bbc0bbd6777d" (UID: "7f0118a1-6a58-42e3-b8af-bbc0bbd6777d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.124838 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.127816 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-kube-api-access-zvf9w" (OuterVolumeSpecName: "kube-api-access-zvf9w") pod "7f0118a1-6a58-42e3-b8af-bbc0bbd6777d" (UID: "7f0118a1-6a58-42e3-b8af-bbc0bbd6777d"). InnerVolumeSpecName "kube-api-access-zvf9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.185429 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-fblln"] Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.229572 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w8ff\" (UniqueName: \"kubernetes.io/projected/8be5d3d7-b0d0-47ba-906c-efe5b871f035-kube-api-access-2w8ff\") pod \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.230015 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcsv8\" (UniqueName: \"kubernetes.io/projected/f33aa46e-bc47-4499-b263-7a0152340bc4-kube-api-access-lcsv8\") pod \"f33aa46e-bc47-4499-b263-7a0152340bc4\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.230053 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8be5d3d7-b0d0-47ba-906c-efe5b871f035-operator-scripts\") pod \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\" (UID: \"8be5d3d7-b0d0-47ba-906c-efe5b871f035\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.230080 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhlmq\" (UniqueName: \"kubernetes.io/projected/d6c0346e-3510-4fa2-ac67-46bed9447c6b-kube-api-access-nhlmq\") pod \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.230245 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6c0346e-3510-4fa2-ac67-46bed9447c6b-operator-scripts\") pod \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\" (UID: \"d6c0346e-3510-4fa2-ac67-46bed9447c6b\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.230303 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjwgd\" (UniqueName: \"kubernetes.io/projected/6aa96069-1d33-441b-85e7-64c2a06b3860-kube-api-access-qjwgd\") pod \"6aa96069-1d33-441b-85e7-64c2a06b3860\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.230498 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6aa96069-1d33-441b-85e7-64c2a06b3860-operator-scripts\") pod \"6aa96069-1d33-441b-85e7-64c2a06b3860\" (UID: \"6aa96069-1d33-441b-85e7-64c2a06b3860\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.230545 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33aa46e-bc47-4499-b263-7a0152340bc4-operator-scripts\") pod \"f33aa46e-bc47-4499-b263-7a0152340bc4\" (UID: \"f33aa46e-bc47-4499-b263-7a0152340bc4\") " Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.231559 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvf9w\" (UniqueName: \"kubernetes.io/projected/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d-kube-api-access-zvf9w\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.233850 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8be5d3d7-b0d0-47ba-906c-efe5b871f035-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8be5d3d7-b0d0-47ba-906c-efe5b871f035" (UID: "8be5d3d7-b0d0-47ba-906c-efe5b871f035"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.234290 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f33aa46e-bc47-4499-b263-7a0152340bc4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f33aa46e-bc47-4499-b263-7a0152340bc4" (UID: "f33aa46e-bc47-4499-b263-7a0152340bc4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.234928 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6c0346e-3510-4fa2-ac67-46bed9447c6b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d6c0346e-3510-4fa2-ac67-46bed9447c6b" (UID: "d6c0346e-3510-4fa2-ac67-46bed9447c6b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.237025 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6c0346e-3510-4fa2-ac67-46bed9447c6b-kube-api-access-nhlmq" (OuterVolumeSpecName: "kube-api-access-nhlmq") pod "d6c0346e-3510-4fa2-ac67-46bed9447c6b" (UID: "d6c0346e-3510-4fa2-ac67-46bed9447c6b"). InnerVolumeSpecName "kube-api-access-nhlmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.239063 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6aa96069-1d33-441b-85e7-64c2a06b3860-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6aa96069-1d33-441b-85e7-64c2a06b3860" (UID: "6aa96069-1d33-441b-85e7-64c2a06b3860"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.240228 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f33aa46e-bc47-4499-b263-7a0152340bc4-kube-api-access-lcsv8" (OuterVolumeSpecName: "kube-api-access-lcsv8") pod "f33aa46e-bc47-4499-b263-7a0152340bc4" (UID: "f33aa46e-bc47-4499-b263-7a0152340bc4"). InnerVolumeSpecName "kube-api-access-lcsv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.248808 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6aa96069-1d33-441b-85e7-64c2a06b3860-kube-api-access-qjwgd" (OuterVolumeSpecName: "kube-api-access-qjwgd") pod "6aa96069-1d33-441b-85e7-64c2a06b3860" (UID: "6aa96069-1d33-441b-85e7-64c2a06b3860"). InnerVolumeSpecName "kube-api-access-qjwgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.258282 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8be5d3d7-b0d0-47ba-906c-efe5b871f035-kube-api-access-2w8ff" (OuterVolumeSpecName: "kube-api-access-2w8ff") pod "8be5d3d7-b0d0-47ba-906c-efe5b871f035" (UID: "8be5d3d7-b0d0-47ba-906c-efe5b871f035"). InnerVolumeSpecName "kube-api-access-2w8ff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332843 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6aa96069-1d33-441b-85e7-64c2a06b3860-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332875 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f33aa46e-bc47-4499-b263-7a0152340bc4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332885 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w8ff\" (UniqueName: \"kubernetes.io/projected/8be5d3d7-b0d0-47ba-906c-efe5b871f035-kube-api-access-2w8ff\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332898 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcsv8\" (UniqueName: \"kubernetes.io/projected/f33aa46e-bc47-4499-b263-7a0152340bc4-kube-api-access-lcsv8\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332907 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8be5d3d7-b0d0-47ba-906c-efe5b871f035-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332915 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhlmq\" (UniqueName: \"kubernetes.io/projected/d6c0346e-3510-4fa2-ac67-46bed9447c6b-kube-api-access-nhlmq\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332924 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6c0346e-3510-4fa2-ac67-46bed9447c6b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.332932 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjwgd\" (UniqueName: \"kubernetes.io/projected/6aa96069-1d33-441b-85e7-64c2a06b3860-kube-api-access-qjwgd\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:39 crc kubenswrapper[4877]: I0128 16:58:39.434337 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-4rgth"] Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.003680 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qg5jt" event={"ID":"f33aa46e-bc47-4499-b263-7a0152340bc4","Type":"ContainerDied","Data":"3f179b9a76e54e3166dae3268ac7b8e9152d7a51ae2d84d849a5458da9e97539"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.004111 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f179b9a76e54e3166dae3268ac7b8e9152d7a51ae2d84d849a5458da9e97539" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.003709 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qg5jt" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.008768 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-526f-account-create-update-ngljr" event={"ID":"8be5d3d7-b0d0-47ba-906c-efe5b871f035","Type":"ContainerDied","Data":"d86e45521b97f6dba47ad291e7d55de0cdce9016a03778f70bb153cbaf614bc1"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.008812 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d86e45521b97f6dba47ad291e7d55de0cdce9016a03778f70bb153cbaf614bc1" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.008875 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-526f-account-create-update-ngljr" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.012576 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-mxsg9" event={"ID":"7f0118a1-6a58-42e3-b8af-bbc0bbd6777d","Type":"ContainerDied","Data":"8d2bde655a093fda4b2515fca9a5169f87df0b2eb1ae7c0fbd7457d8343b51c1"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.012620 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d2bde655a093fda4b2515fca9a5169f87df0b2eb1ae7c0fbd7457d8343b51c1" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.012625 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-mxsg9" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.017406 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-e6a7-account-create-update-f2jl2" event={"ID":"6aa96069-1d33-441b-85e7-64c2a06b3860","Type":"ContainerDied","Data":"ee1a2f69ffa9e1ca24d169168c5800932dec9608e3576a57e683d676b883098a"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.017449 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee1a2f69ffa9e1ca24d169168c5800932dec9608e3576a57e683d676b883098a" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.017449 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e6a7-account-create-update-f2jl2" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.022084 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d6a6-account-create-update-zc2m5" event={"ID":"d6c0346e-3510-4fa2-ac67-46bed9447c6b","Type":"ContainerDied","Data":"31aa7cf69fd88c90067ca17fa6d0cc0c01972d8eb8cf3c8b395c859ddc2597b8"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.022095 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6a6-account-create-update-zc2m5" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.023611 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31aa7cf69fd88c90067ca17fa6d0cc0c01972d8eb8cf3c8b395c859ddc2597b8" Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.026556 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4rgth" event={"ID":"26904586-2e5c-44de-b91c-0a6d288d6d9e","Type":"ContainerStarted","Data":"adbcd74da6cd3bd303e7b6b4214a66233e04c4a5fcf9e5dae03270a1ddef7a09"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.026608 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4rgth" event={"ID":"26904586-2e5c-44de-b91c-0a6d288d6d9e","Type":"ContainerStarted","Data":"5a87a1a1430cefbabc6a4bd388ee9ff988e7d96266529bfea61d8c81e4067eab"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.029324 4877 generic.go:334] "Generic (PLEG): container finished" podID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerID="dd7306c5a3a689e3e39fb89c4d4f8d80a2f16754ce2f42c672b602df1152187e" exitCode=0 Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.029358 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" event={"ID":"16116e77-e71d-41b1-a821-2c01fbbd71ae","Type":"ContainerDied","Data":"dd7306c5a3a689e3e39fb89c4d4f8d80a2f16754ce2f42c672b602df1152187e"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.029396 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" event={"ID":"16116e77-e71d-41b1-a821-2c01fbbd71ae","Type":"ContainerStarted","Data":"ace95f8d129077569fc1e1f5f30930f6eabd368da59746a16942631e0c549254"} Jan 28 16:58:40 crc kubenswrapper[4877]: I0128 16:58:40.103075 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-4rgth" podStartSLOduration=3.103053272 podStartE2EDuration="3.103053272s" podCreationTimestamp="2026-01-28 16:58:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:40.085080191 +0000 UTC m=+1423.643407079" watchObservedRunningTime="2026-01-28 16:58:40.103053272 +0000 UTC m=+1423.661380160" Jan 28 16:58:41 crc kubenswrapper[4877]: I0128 16:58:41.059914 4877 generic.go:334] "Generic (PLEG): container finished" podID="26904586-2e5c-44de-b91c-0a6d288d6d9e" containerID="adbcd74da6cd3bd303e7b6b4214a66233e04c4a5fcf9e5dae03270a1ddef7a09" exitCode=0 Jan 28 16:58:41 crc kubenswrapper[4877]: I0128 16:58:41.060607 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4rgth" event={"ID":"26904586-2e5c-44de-b91c-0a6d288d6d9e","Type":"ContainerDied","Data":"adbcd74da6cd3bd303e7b6b4214a66233e04c4a5fcf9e5dae03270a1ddef7a09"} Jan 28 16:58:41 crc kubenswrapper[4877]: I0128 16:58:41.075262 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" event={"ID":"16116e77-e71d-41b1-a821-2c01fbbd71ae","Type":"ContainerStarted","Data":"f65447dc29a5eafcdb4ea8557741642a3edb658c1c29f148fa00a226c8b97756"} Jan 28 16:58:41 crc kubenswrapper[4877]: I0128 16:58:41.075610 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:41 crc kubenswrapper[4877]: I0128 16:58:41.117797 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podStartSLOduration=4.117768634 podStartE2EDuration="4.117768634s" podCreationTimestamp="2026-01-28 16:58:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:58:41.116328766 +0000 UTC m=+1424.674655654" watchObservedRunningTime="2026-01-28 16:58:41.117768634 +0000 UTC m=+1424.676095522" Jan 28 16:58:42 crc kubenswrapper[4877]: I0128 16:58:42.037880 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:58:42 crc kubenswrapper[4877]: I0128 16:58:42.046363 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="prometheus" containerID="cri-o://f8d8b88d5022a7165f7254dc48c17aeaf1910fdac484ecbe5086133c39685ab5" gracePeriod=600 Jan 28 16:58:42 crc kubenswrapper[4877]: I0128 16:58:42.046665 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="config-reloader" containerID="cri-o://f75eb898b0085ad22f7310c9505b7c2e8609e44f67bcc39d588a63f3a2c9038b" gracePeriod=600 Jan 28 16:58:42 crc kubenswrapper[4877]: I0128 16:58:42.046598 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="thanos-sidecar" containerID="cri-o://3eda1fa15eaacb7af31c17d834c2201f36f138e9d474bb328aa5ca3da46a43e3" gracePeriod=600 Jan 28 16:58:43 crc kubenswrapper[4877]: I0128 16:58:43.107221 4877 generic.go:334] "Generic (PLEG): container finished" podID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerID="3eda1fa15eaacb7af31c17d834c2201f36f138e9d474bb328aa5ca3da46a43e3" exitCode=0 Jan 28 16:58:43 crc kubenswrapper[4877]: I0128 16:58:43.107785 4877 generic.go:334] "Generic (PLEG): container finished" podID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerID="f75eb898b0085ad22f7310c9505b7c2e8609e44f67bcc39d588a63f3a2c9038b" exitCode=0 Jan 28 16:58:43 crc kubenswrapper[4877]: I0128 16:58:43.107796 4877 generic.go:334] "Generic (PLEG): container finished" podID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerID="f8d8b88d5022a7165f7254dc48c17aeaf1910fdac484ecbe5086133c39685ab5" exitCode=0 Jan 28 16:58:43 crc kubenswrapper[4877]: I0128 16:58:43.107260 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerDied","Data":"3eda1fa15eaacb7af31c17d834c2201f36f138e9d474bb328aa5ca3da46a43e3"} Jan 28 16:58:43 crc kubenswrapper[4877]: I0128 16:58:43.107830 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerDied","Data":"f75eb898b0085ad22f7310c9505b7c2e8609e44f67bcc39d588a63f3a2c9038b"} Jan 28 16:58:43 crc kubenswrapper[4877]: I0128 16:58:43.107841 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerDied","Data":"f8d8b88d5022a7165f7254dc48c17aeaf1910fdac484ecbe5086133c39685ab5"} Jan 28 16:58:43 crc kubenswrapper[4877]: I0128 16:58:43.185862 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.137:9090/-/ready\": dial tcp 10.217.0.137:9090: connect: connection refused" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.011782 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.119588 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sx8r\" (UniqueName: \"kubernetes.io/projected/26904586-2e5c-44de-b91c-0a6d288d6d9e-kube-api-access-2sx8r\") pod \"26904586-2e5c-44de-b91c-0a6d288d6d9e\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.119698 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26904586-2e5c-44de-b91c-0a6d288d6d9e-operator-scripts\") pod \"26904586-2e5c-44de-b91c-0a6d288d6d9e\" (UID: \"26904586-2e5c-44de-b91c-0a6d288d6d9e\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.121395 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26904586-2e5c-44de-b91c-0a6d288d6d9e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "26904586-2e5c-44de-b91c-0a6d288d6d9e" (UID: "26904586-2e5c-44de-b91c-0a6d288d6d9e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.129687 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26904586-2e5c-44de-b91c-0a6d288d6d9e-kube-api-access-2sx8r" (OuterVolumeSpecName: "kube-api-access-2sx8r") pod "26904586-2e5c-44de-b91c-0a6d288d6d9e" (UID: "26904586-2e5c-44de-b91c-0a6d288d6d9e"). InnerVolumeSpecName "kube-api-access-2sx8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.133392 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-4rgth" event={"ID":"26904586-2e5c-44de-b91c-0a6d288d6d9e","Type":"ContainerDied","Data":"5a87a1a1430cefbabc6a4bd388ee9ff988e7d96266529bfea61d8c81e4067eab"} Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.133431 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a87a1a1430cefbabc6a4bd388ee9ff988e7d96266529bfea61d8c81e4067eab" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.134015 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-4rgth" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.222969 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sx8r\" (UniqueName: \"kubernetes.io/projected/26904586-2e5c-44de-b91c-0a6d288d6d9e-kube-api-access-2sx8r\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.222997 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/26904586-2e5c-44de-b91c-0a6d288d6d9e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.454321 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.542285 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-config\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.542417 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-web-config\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.542513 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-2\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.542617 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-thanos-prometheus-http-client-file\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.542991 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-tls-assets\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.543133 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/36256bdd-5ada-4651-8944-ed4c8978ed2c-config-out\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.543245 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-0\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.543382 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g9ck\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-kube-api-access-9g9ck\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.543638 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.543772 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-1\") pod \"36256bdd-5ada-4651-8944-ed4c8978ed2c\" (UID: \"36256bdd-5ada-4651-8944-ed4c8978ed2c\") " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.545858 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.547958 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.548403 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-config" (OuterVolumeSpecName: "config") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.548446 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.552789 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.554634 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36256bdd-5ada-4651-8944-ed4c8978ed2c-config-out" (OuterVolumeSpecName: "config-out") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.558107 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-kube-api-access-9g9ck" (OuterVolumeSpecName: "kube-api-access-9g9ck") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "kube-api-access-9g9ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.563994 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.597505 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.605674 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-web-config" (OuterVolumeSpecName: "web-config") pod "36256bdd-5ada-4651-8944-ed4c8978ed2c" (UID: "36256bdd-5ada-4651-8944-ed4c8978ed2c"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650419 4877 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650461 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650486 4877 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-web-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650497 4877 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650509 4877 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/36256bdd-5ada-4651-8944-ed4c8978ed2c-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650518 4877 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650526 4877 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/36256bdd-5ada-4651-8944-ed4c8978ed2c-config-out\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650534 4877 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/36256bdd-5ada-4651-8944-ed4c8978ed2c-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650543 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g9ck\" (UniqueName: \"kubernetes.io/projected/36256bdd-5ada-4651-8944-ed4c8978ed2c-kube-api-access-9g9ck\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.650582 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") on node \"crc\" " Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.677450 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.678408 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9") on node "crc" Jan 28 16:58:45 crc kubenswrapper[4877]: I0128 16:58:45.753435 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") on node \"crc\" DevicePath \"\"" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.147865 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.147871 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"36256bdd-5ada-4651-8944-ed4c8978ed2c","Type":"ContainerDied","Data":"4b2ecfd565282078feb900f4608502714c9c7ea947b3bf94111217c0b19823e1"} Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.150002 4877 scope.go:117] "RemoveContainer" containerID="3eda1fa15eaacb7af31c17d834c2201f36f138e9d474bb328aa5ca3da46a43e3" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.155409 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-g4wc7" event={"ID":"424aed47-f19b-40f2-b8c1-dfc24b8d605d","Type":"ContainerStarted","Data":"b20aadef947101060d0f434ca40a14d9aa659021699079e8eacf772a9e90e62b"} Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.193861 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-g4wc7" podStartSLOduration=2.896397584 podStartE2EDuration="12.1938442s" podCreationTimestamp="2026-01-28 16:58:34 +0000 UTC" firstStartedPulling="2026-01-28 16:58:36.123224164 +0000 UTC m=+1419.681551052" lastFinishedPulling="2026-01-28 16:58:45.42067078 +0000 UTC m=+1428.978997668" observedRunningTime="2026-01-28 16:58:46.184372874 +0000 UTC m=+1429.742699752" watchObservedRunningTime="2026-01-28 16:58:46.1938442 +0000 UTC m=+1429.752171088" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.194644 4877 scope.go:117] "RemoveContainer" containerID="f75eb898b0085ad22f7310c9505b7c2e8609e44f67bcc39d588a63f3a2c9038b" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.220215 4877 scope.go:117] "RemoveContainer" containerID="f8d8b88d5022a7165f7254dc48c17aeaf1910fdac484ecbe5086133c39685ab5" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.229398 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.260960 4877 scope.go:117] "RemoveContainer" containerID="667938be49db0d327863df1e58495f8cc87ed7b4d96182a54a06a44874fcd16b" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.267924 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.298782 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299800 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="thanos-sidecar" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299822 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="thanos-sidecar" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299866 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6c0346e-3510-4fa2-ac67-46bed9447c6b" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299872 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6c0346e-3510-4fa2-ac67-46bed9447c6b" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299888 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="config-reloader" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299896 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="config-reloader" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299915 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0118a1-6a58-42e3-b8af-bbc0bbd6777d" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299921 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0118a1-6a58-42e3-b8af-bbc0bbd6777d" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299929 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b658b847-5c12-40f3-8602-a0897a18066f" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299935 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b658b847-5c12-40f3-8602-a0897a18066f" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299952 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26904586-2e5c-44de-b91c-0a6d288d6d9e" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299958 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="26904586-2e5c-44de-b91c-0a6d288d6d9e" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299970 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be5d3d7-b0d0-47ba-906c-efe5b871f035" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299975 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be5d3d7-b0d0-47ba-906c-efe5b871f035" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.299989 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b73dca88-cd81-4389-86cf-dd7973a4e489" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.299995 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b73dca88-cd81-4389-86cf-dd7973a4e489" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.300014 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f33aa46e-bc47-4499-b263-7a0152340bc4" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300022 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f33aa46e-bc47-4499-b263-7a0152340bc4" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.300034 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="init-config-reloader" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300040 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="init-config-reloader" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.300052 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aa96069-1d33-441b-85e7-64c2a06b3860" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300059 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aa96069-1d33-441b-85e7-64c2a06b3860" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: E0128 16:58:46.300069 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="prometheus" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300075 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="prometheus" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300265 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="26904586-2e5c-44de-b91c-0a6d288d6d9e" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300276 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="thanos-sidecar" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300289 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6aa96069-1d33-441b-85e7-64c2a06b3860" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300303 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b73dca88-cd81-4389-86cf-dd7973a4e489" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300316 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="prometheus" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300326 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8be5d3d7-b0d0-47ba-906c-efe5b871f035" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300336 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6c0346e-3510-4fa2-ac67-46bed9447c6b" containerName="mariadb-account-create-update" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300347 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" containerName="config-reloader" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300358 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b658b847-5c12-40f3-8602-a0897a18066f" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300368 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0118a1-6a58-42e3-b8af-bbc0bbd6777d" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.300378 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f33aa46e-bc47-4499-b263-7a0152340bc4" containerName="mariadb-database-create" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.302710 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.306936 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.307542 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.307806 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.308179 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.308402 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.309058 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-k2v98" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.314259 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.314835 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.315448 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.317362 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.477987 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-config\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.478061 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.478126 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.478168 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.478204 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.478318 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.484681 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.484796 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.484904 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmgl9\" (UniqueName: \"kubernetes.io/projected/06cc206a-5856-43ec-a04b-b2d51224314d-kube-api-access-zmgl9\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.484968 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/06cc206a-5856-43ec-a04b-b2d51224314d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.485027 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.485101 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.485347 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/06cc206a-5856-43ec-a04b-b2d51224314d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588445 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588587 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588640 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588664 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588704 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmgl9\" (UniqueName: \"kubernetes.io/projected/06cc206a-5856-43ec-a04b-b2d51224314d-kube-api-access-zmgl9\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588734 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/06cc206a-5856-43ec-a04b-b2d51224314d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588762 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588790 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588831 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/06cc206a-5856-43ec-a04b-b2d51224314d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588869 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-config\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.588895 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.589072 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.589102 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.606646 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.606746 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.612211 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.613127 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/06cc206a-5856-43ec-a04b-b2d51224314d-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.615642 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.616822 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.617309 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-config\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.618784 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/06cc206a-5856-43ec-a04b-b2d51224314d-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.619566 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.619603 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e56a4798f43fc8e09f4b73faaf38f0237452e71872991f2da2d2463cbf63cff6/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.619745 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/06cc206a-5856-43ec-a04b-b2d51224314d-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.624458 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.640501 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/06cc206a-5856-43ec-a04b-b2d51224314d-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.641504 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmgl9\" (UniqueName: \"kubernetes.io/projected/06cc206a-5856-43ec-a04b-b2d51224314d-kube-api-access-zmgl9\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.939462 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b2ae7ee2-28f4-463c-b4e3-51de473187a9\") pod \"prometheus-metric-storage-0\" (UID: \"06cc206a-5856-43ec-a04b-b2d51224314d\") " pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:46 crc kubenswrapper[4877]: I0128 16:58:46.964779 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 16:58:47 crc kubenswrapper[4877]: I0128 16:58:47.362730 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36256bdd-5ada-4651-8944-ed4c8978ed2c" path="/var/lib/kubelet/pods/36256bdd-5ada-4651-8944-ed4c8978ed2c/volumes" Jan 28 16:58:47 crc kubenswrapper[4877]: I0128 16:58:47.450001 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 16:58:47 crc kubenswrapper[4877]: W0128 16:58:47.716385 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06cc206a_5856_43ec_a04b_b2d51224314d.slice/crio-72dd1b30b8ee76ea5334565ec6730bc59dcd9fa0a4673f84f06051504b1979ea WatchSource:0}: Error finding container 72dd1b30b8ee76ea5334565ec6730bc59dcd9fa0a4673f84f06051504b1979ea: Status 404 returned error can't find the container with id 72dd1b30b8ee76ea5334565ec6730bc59dcd9fa0a4673f84f06051504b1979ea Jan 28 16:58:47 crc kubenswrapper[4877]: I0128 16:58:47.769648 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 16:58:47 crc kubenswrapper[4877]: I0128 16:58:47.851615 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2nqdn"] Jan 28 16:58:47 crc kubenswrapper[4877]: I0128 16:58:47.851903 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-2nqdn" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="dnsmasq-dns" containerID="cri-o://dc5d6c2b8dbc826f1d1cae192b2f9f09346b7c6d555dec08b31640ac0a78e616" gracePeriod=10 Jan 28 16:58:48 crc kubenswrapper[4877]: I0128 16:58:48.217943 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"06cc206a-5856-43ec-a04b-b2d51224314d","Type":"ContainerStarted","Data":"72dd1b30b8ee76ea5334565ec6730bc59dcd9fa0a4673f84f06051504b1979ea"} Jan 28 16:58:49 crc kubenswrapper[4877]: I0128 16:58:49.230646 4877 generic.go:334] "Generic (PLEG): container finished" podID="88801c74-cfbd-4eee-936b-2899b69196aa" containerID="dc5d6c2b8dbc826f1d1cae192b2f9f09346b7c6d555dec08b31640ac0a78e616" exitCode=0 Jan 28 16:58:49 crc kubenswrapper[4877]: I0128 16:58:49.230697 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2nqdn" event={"ID":"88801c74-cfbd-4eee-936b-2899b69196aa","Type":"ContainerDied","Data":"dc5d6c2b8dbc826f1d1cae192b2f9f09346b7c6d555dec08b31640ac0a78e616"} Jan 28 16:58:53 crc kubenswrapper[4877]: I0128 16:58:53.278733 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"06cc206a-5856-43ec-a04b-b2d51224314d","Type":"ContainerStarted","Data":"ef98b94744813d31f37fbc063f7e4b6773d275a8b34800676dbec159fa549aba"} Jan 28 16:58:57 crc kubenswrapper[4877]: I0128 16:58:57.190333 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2nqdn" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.146:5353: i/o timeout" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.126969 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.178827 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-dns-svc\") pod \"88801c74-cfbd-4eee-936b-2899b69196aa\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.178982 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrq48\" (UniqueName: \"kubernetes.io/projected/88801c74-cfbd-4eee-936b-2899b69196aa-kube-api-access-mrq48\") pod \"88801c74-cfbd-4eee-936b-2899b69196aa\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.179041 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-nb\") pod \"88801c74-cfbd-4eee-936b-2899b69196aa\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.179137 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-sb\") pod \"88801c74-cfbd-4eee-936b-2899b69196aa\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.179225 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-config\") pod \"88801c74-cfbd-4eee-936b-2899b69196aa\" (UID: \"88801c74-cfbd-4eee-936b-2899b69196aa\") " Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.185671 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88801c74-cfbd-4eee-936b-2899b69196aa-kube-api-access-mrq48" (OuterVolumeSpecName: "kube-api-access-mrq48") pod "88801c74-cfbd-4eee-936b-2899b69196aa" (UID: "88801c74-cfbd-4eee-936b-2899b69196aa"). InnerVolumeSpecName "kube-api-access-mrq48". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.229663 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "88801c74-cfbd-4eee-936b-2899b69196aa" (UID: "88801c74-cfbd-4eee-936b-2899b69196aa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.258773 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-config" (OuterVolumeSpecName: "config") pod "88801c74-cfbd-4eee-936b-2899b69196aa" (UID: "88801c74-cfbd-4eee-936b-2899b69196aa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.265418 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "88801c74-cfbd-4eee-936b-2899b69196aa" (UID: "88801c74-cfbd-4eee-936b-2899b69196aa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.277910 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "88801c74-cfbd-4eee-936b-2899b69196aa" (UID: "88801c74-cfbd-4eee-936b-2899b69196aa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.281763 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.281800 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.281810 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrq48\" (UniqueName: \"kubernetes.io/projected/88801c74-cfbd-4eee-936b-2899b69196aa-kube-api-access-mrq48\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.281824 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.281835 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/88801c74-cfbd-4eee-936b-2899b69196aa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.363396 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2nqdn" event={"ID":"88801c74-cfbd-4eee-936b-2899b69196aa","Type":"ContainerDied","Data":"61a929767be35d841e2256976c5b376fd6c95210a37ea8d0beee52968e41668f"} Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.363465 4877 scope.go:117] "RemoveContainer" containerID="dc5d6c2b8dbc826f1d1cae192b2f9f09346b7c6d555dec08b31640ac0a78e616" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.363496 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2nqdn" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.394256 4877 scope.go:117] "RemoveContainer" containerID="b1c85c55edc6ef969b940e4a77ab042c02a5f3e415e516c9b2407efd48050836" Jan 28 16:59:00 crc kubenswrapper[4877]: E0128 16:59:00.403988 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Jan 28 16:59:00 crc kubenswrapper[4877]: E0128 16:59:00.404339 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9zvpx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-lptvh_openstack(534e973d-d29f-4aac-8922-5f42d27c0770): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 16:59:00 crc kubenswrapper[4877]: E0128 16:59:00.405619 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-lptvh" podUID="534e973d-d29f-4aac-8922-5f42d27c0770" Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.412578 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2nqdn"] Jan 28 16:59:00 crc kubenswrapper[4877]: I0128 16:59:00.423275 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2nqdn"] Jan 28 16:59:01 crc kubenswrapper[4877]: I0128 16:59:01.348873 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" path="/var/lib/kubelet/pods/88801c74-cfbd-4eee-936b-2899b69196aa/volumes" Jan 28 16:59:01 crc kubenswrapper[4877]: I0128 16:59:01.386402 4877 generic.go:334] "Generic (PLEG): container finished" podID="06cc206a-5856-43ec-a04b-b2d51224314d" containerID="ef98b94744813d31f37fbc063f7e4b6773d275a8b34800676dbec159fa549aba" exitCode=0 Jan 28 16:59:01 crc kubenswrapper[4877]: I0128 16:59:01.386682 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"06cc206a-5856-43ec-a04b-b2d51224314d","Type":"ContainerDied","Data":"ef98b94744813d31f37fbc063f7e4b6773d275a8b34800676dbec159fa549aba"} Jan 28 16:59:01 crc kubenswrapper[4877]: E0128 16:59:01.388911 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-lptvh" podUID="534e973d-d29f-4aac-8922-5f42d27c0770" Jan 28 16:59:02 crc kubenswrapper[4877]: I0128 16:59:02.191834 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2nqdn" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.146:5353: i/o timeout" Jan 28 16:59:03 crc kubenswrapper[4877]: I0128 16:59:03.409940 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"06cc206a-5856-43ec-a04b-b2d51224314d","Type":"ContainerStarted","Data":"9e8ea428cfcb86815284f3c99e2b52fb110a81caf31b08b4ce809bf3dbb3d040"} Jan 28 16:59:07 crc kubenswrapper[4877]: I0128 16:59:07.450243 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"06cc206a-5856-43ec-a04b-b2d51224314d","Type":"ContainerStarted","Data":"b3696ce372172cbdf9f6cf4d4ca2f7c261a99d4643541095021e28a73598884d"} Jan 28 16:59:08 crc kubenswrapper[4877]: I0128 16:59:08.462168 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"06cc206a-5856-43ec-a04b-b2d51224314d","Type":"ContainerStarted","Data":"c0dc69fe881c2e6fbfaaa0f71102242339369e7a18390ae9cea0f34263f651ab"} Jan 28 16:59:08 crc kubenswrapper[4877]: I0128 16:59:08.495828 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=22.495804069 podStartE2EDuration="22.495804069s" podCreationTimestamp="2026-01-28 16:58:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:59:08.488604375 +0000 UTC m=+1452.046931263" watchObservedRunningTime="2026-01-28 16:59:08.495804069 +0000 UTC m=+1452.054130967" Jan 28 16:59:11 crc kubenswrapper[4877]: I0128 16:59:11.965540 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 16:59:15 crc kubenswrapper[4877]: I0128 16:59:15.544737 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lptvh" event={"ID":"534e973d-d29f-4aac-8922-5f42d27c0770","Type":"ContainerStarted","Data":"08f7c5897d2ff879abbf30d741b68b0330db81c5cc4d9849652a8f75af4661dc"} Jan 28 16:59:15 crc kubenswrapper[4877]: I0128 16:59:15.575731 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-lptvh" podStartSLOduration=3.30142772 podStartE2EDuration="41.575714693s" podCreationTimestamp="2026-01-28 16:58:34 +0000 UTC" firstStartedPulling="2026-01-28 16:58:35.968170473 +0000 UTC m=+1419.526497361" lastFinishedPulling="2026-01-28 16:59:14.242457446 +0000 UTC m=+1457.800784334" observedRunningTime="2026-01-28 16:59:15.573577635 +0000 UTC m=+1459.131904523" watchObservedRunningTime="2026-01-28 16:59:15.575714693 +0000 UTC m=+1459.134041581" Jan 28 16:59:16 crc kubenswrapper[4877]: I0128 16:59:16.966156 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 16:59:16 crc kubenswrapper[4877]: I0128 16:59:16.971823 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 16:59:17 crc kubenswrapper[4877]: I0128 16:59:17.569642 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 16:59:37 crc kubenswrapper[4877]: I0128 16:59:37.076143 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:59:37 crc kubenswrapper[4877]: I0128 16:59:37.076773 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:59:47 crc kubenswrapper[4877]: I0128 16:59:47.905315 4877 generic.go:334] "Generic (PLEG): container finished" podID="424aed47-f19b-40f2-b8c1-dfc24b8d605d" containerID="b20aadef947101060d0f434ca40a14d9aa659021699079e8eacf772a9e90e62b" exitCode=0 Jan 28 16:59:47 crc kubenswrapper[4877]: I0128 16:59:47.905639 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-g4wc7" event={"ID":"424aed47-f19b-40f2-b8c1-dfc24b8d605d","Type":"ContainerDied","Data":"b20aadef947101060d0f434ca40a14d9aa659021699079e8eacf772a9e90e62b"} Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.312211 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.334693 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-config-data\") pod \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.334842 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cw9m\" (UniqueName: \"kubernetes.io/projected/424aed47-f19b-40f2-b8c1-dfc24b8d605d-kube-api-access-7cw9m\") pod \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.335110 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-combined-ca-bundle\") pod \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\" (UID: \"424aed47-f19b-40f2-b8c1-dfc24b8d605d\") " Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.347959 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/424aed47-f19b-40f2-b8c1-dfc24b8d605d-kube-api-access-7cw9m" (OuterVolumeSpecName: "kube-api-access-7cw9m") pod "424aed47-f19b-40f2-b8c1-dfc24b8d605d" (UID: "424aed47-f19b-40f2-b8c1-dfc24b8d605d"). InnerVolumeSpecName "kube-api-access-7cw9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.398580 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "424aed47-f19b-40f2-b8c1-dfc24b8d605d" (UID: "424aed47-f19b-40f2-b8c1-dfc24b8d605d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.431588 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-config-data" (OuterVolumeSpecName: "config-data") pod "424aed47-f19b-40f2-b8c1-dfc24b8d605d" (UID: "424aed47-f19b-40f2-b8c1-dfc24b8d605d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.457846 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cw9m\" (UniqueName: \"kubernetes.io/projected/424aed47-f19b-40f2-b8c1-dfc24b8d605d-kube-api-access-7cw9m\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.457887 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.457897 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/424aed47-f19b-40f2-b8c1-dfc24b8d605d-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.928370 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-g4wc7" event={"ID":"424aed47-f19b-40f2-b8c1-dfc24b8d605d","Type":"ContainerDied","Data":"38a518a91142158688eeea5dfddd4cd8b9792e1693ef05f9705bcf7e7d085cb0"} Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.928691 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38a518a91142158688eeea5dfddd4cd8b9792e1693ef05f9705bcf7e7d085cb0" Jan 28 16:59:49 crc kubenswrapper[4877]: I0128 16:59:49.928450 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-g4wc7" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.267992 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-g4rk2"] Jan 28 16:59:50 crc kubenswrapper[4877]: E0128 16:59:50.268429 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="424aed47-f19b-40f2-b8c1-dfc24b8d605d" containerName="keystone-db-sync" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.268442 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="424aed47-f19b-40f2-b8c1-dfc24b8d605d" containerName="keystone-db-sync" Jan 28 16:59:50 crc kubenswrapper[4877]: E0128 16:59:50.295021 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="dnsmasq-dns" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.295059 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="dnsmasq-dns" Jan 28 16:59:50 crc kubenswrapper[4877]: E0128 16:59:50.295119 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="init" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.295125 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="init" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.304868 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="424aed47-f19b-40f2-b8c1-dfc24b8d605d" containerName="keystone-db-sync" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.304914 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="88801c74-cfbd-4eee-936b-2899b69196aa" containerName="dnsmasq-dns" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.305820 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-9n6vh"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.307304 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.307694 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-9n6vh"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.308212 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.310206 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-45ggb" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.310566 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.310730 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.310888 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.311705 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g4rk2"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.319639 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.382252 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-pbjv2"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.384320 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.390530 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.390812 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-tj2bg" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.422038 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-pbjv2"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486048 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-config-data\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486227 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-scripts\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486299 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-config\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486401 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-config-data\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486432 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-credential-keys\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486467 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8plmh\" (UniqueName: \"kubernetes.io/projected/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-kube-api-access-8plmh\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486548 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-svc\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486652 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-fernet-keys\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.486699 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.487043 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-combined-ca-bundle\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.487172 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-combined-ca-bundle\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.487496 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj8nj\" (UniqueName: \"kubernetes.io/projected/8b598e97-6903-4885-a697-661b4bbe3dc5-kube-api-access-nj8nj\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.487633 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwntv\" (UniqueName: \"kubernetes.io/projected/6f72ad63-197a-4049-8cad-6f897806b522-kube-api-access-hwntv\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.487810 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.487904 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.571292 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-4wxn6"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.573609 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.583967 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.584250 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.584869 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-bcjxh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590316 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-combined-ca-bundle\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590404 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj8nj\" (UniqueName: \"kubernetes.io/projected/8b598e97-6903-4885-a697-661b4bbe3dc5-kube-api-access-nj8nj\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590440 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwntv\" (UniqueName: \"kubernetes.io/projected/6f72ad63-197a-4049-8cad-6f897806b522-kube-api-access-hwntv\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590503 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590513 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-957zv"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590535 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590600 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-config-data\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590645 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-scripts\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590677 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-config\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590709 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-config-data\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590725 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-credential-keys\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590748 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8plmh\" (UniqueName: \"kubernetes.io/projected/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-kube-api-access-8plmh\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590779 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-svc\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590827 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-fernet-keys\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590851 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.590896 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-combined-ca-bundle\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.592417 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.592668 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-config\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.592993 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.593818 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.595078 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.595499 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-svc\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.597330 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-p2r4c" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.598129 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.598735 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.637455 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4wxn6"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.637537 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-combined-ca-bundle\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.642456 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-config-data\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.655381 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-combined-ca-bundle\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.683262 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8plmh\" (UniqueName: \"kubernetes.io/projected/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-kube-api-access-8plmh\") pod \"heat-db-sync-pbjv2\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.686151 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-credential-keys\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.686863 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwntv\" (UniqueName: \"kubernetes.io/projected/6f72ad63-197a-4049-8cad-6f897806b522-kube-api-access-hwntv\") pod \"dnsmasq-dns-55fff446b9-9n6vh\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.687234 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-scripts\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.691225 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj8nj\" (UniqueName: \"kubernetes.io/projected/8b598e97-6903-4885-a697-661b4bbe3dc5-kube-api-access-nj8nj\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.693140 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-config-data\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.695240 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-db-sync-config-data\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.695340 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-combined-ca-bundle\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.695641 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6becdddb-915e-40e0-ba03-9de124ad56c7-etc-machine-id\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.695905 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vnj2\" (UniqueName: \"kubernetes.io/projected/6becdddb-915e-40e0-ba03-9de124ad56c7-kube-api-access-8vnj2\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.696109 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-scripts\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.696333 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-config\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.696785 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7254g\" (UniqueName: \"kubernetes.io/projected/dc6b6b48-855c-412b-af8b-be4c27962c4b-kube-api-access-7254g\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.701698 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-combined-ca-bundle\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.700433 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-config-data\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.728734 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pbjv2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.740575 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-fernet-keys\") pod \"keystone-bootstrap-g4rk2\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.805191 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-957zv"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813396 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-scripts\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813522 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-config\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813606 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7254g\" (UniqueName: \"kubernetes.io/projected/dc6b6b48-855c-412b-af8b-be4c27962c4b-kube-api-access-7254g\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813665 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-combined-ca-bundle\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813709 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-config-data\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813738 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-db-sync-config-data\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813763 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-combined-ca-bundle\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813826 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6becdddb-915e-40e0-ba03-9de124ad56c7-etc-machine-id\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.813889 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vnj2\" (UniqueName: \"kubernetes.io/projected/6becdddb-915e-40e0-ba03-9de124ad56c7-kube-api-access-8vnj2\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.827057 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6becdddb-915e-40e0-ba03-9de124ad56c7-etc-machine-id\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.843529 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-db-sync-config-data\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.843833 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-combined-ca-bundle\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.881108 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-6hfcz"] Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.882426 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-config-data\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.882727 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-combined-ca-bundle\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.883049 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vnj2\" (UniqueName: \"kubernetes.io/projected/6becdddb-915e-40e0-ba03-9de124ad56c7-kube-api-access-8vnj2\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.883068 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-scripts\") pod \"cinder-db-sync-957zv\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.888525 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7254g\" (UniqueName: \"kubernetes.io/projected/dc6b6b48-855c-412b-af8b-be4c27962c4b-kube-api-access-7254g\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.895312 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-config\") pod \"neutron-db-sync-4wxn6\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.905271 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.907726 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.907972 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-l6frx" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.913342 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4wxn6" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.935117 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4rk2" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.972218 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:50 crc kubenswrapper[4877]: I0128 16:59:50.985390 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6hfcz"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.002425 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-957zv" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.020689 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-9n6vh"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.039818 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-ktcdh"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.040108 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-combined-ca-bundle\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.044225 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-db-sync-config-data\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.044445 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r65vk\" (UniqueName: \"kubernetes.io/projected/ffc3b7f9-8297-46ac-b550-d61d9513187c-kube-api-access-r65vk\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.042788 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.078354 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-ktcdh"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.097673 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-kfq2d"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.099247 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.105073 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.105611 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-s5l59" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.105638 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.125352 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-kfq2d"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.146593 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r65vk\" (UniqueName: \"kubernetes.io/projected/ffc3b7f9-8297-46ac-b550-d61d9513187c-kube-api-access-r65vk\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.148400 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-config\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.148623 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.148659 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.148719 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.148868 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-combined-ca-bundle\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.150571 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzxkv\" (UniqueName: \"kubernetes.io/projected/63b3e104-e1f3-462d-98b7-4ab4f679b619-kube-api-access-bzxkv\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.150672 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.150824 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-db-sync-config-data\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.157080 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-db-sync-config-data\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.158175 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-combined-ca-bundle\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.172133 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r65vk\" (UniqueName: \"kubernetes.io/projected/ffc3b7f9-8297-46ac-b550-d61d9513187c-kube-api-access-r65vk\") pod \"barbican-db-sync-6hfcz\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.204080 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.210412 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.215837 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.216096 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.218497 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.260340 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcc64de1-66f2-48e6-969f-61aa68773678-logs\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.261075 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-config\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.265922 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-config\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.266187 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-scripts\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.266219 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.266243 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.266268 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.267784 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-config-data\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.268314 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbsl9\" (UniqueName: \"kubernetes.io/projected/dcc64de1-66f2-48e6-969f-61aa68773678-kube-api-access-kbsl9\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.268606 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzxkv\" (UniqueName: \"kubernetes.io/projected/63b3e104-e1f3-462d-98b7-4ab4f679b619-kube-api-access-bzxkv\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.269637 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.270278 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.271668 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.271793 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-combined-ca-bundle\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.272802 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.272943 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.291040 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzxkv\" (UniqueName: \"kubernetes.io/projected/63b3e104-e1f3-462d-98b7-4ab4f679b619-kube-api-access-bzxkv\") pod \"dnsmasq-dns-76fcf4b695-ktcdh\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.335858 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6hfcz" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374014 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-scripts\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374073 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-log-httpd\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374108 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrvjt\" (UniqueName: \"kubernetes.io/projected/de95a2d4-539f-46f1-abcc-fe8e46e404ee-kube-api-access-nrvjt\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374146 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-config-data\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374194 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbsl9\" (UniqueName: \"kubernetes.io/projected/dcc64de1-66f2-48e6-969f-61aa68773678-kube-api-access-kbsl9\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374241 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-scripts\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374304 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-config-data\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374349 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-combined-ca-bundle\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374419 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcc64de1-66f2-48e6-969f-61aa68773678-logs\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374445 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374495 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.374560 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-run-httpd\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.378764 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-config-data\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.379047 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcc64de1-66f2-48e6-969f-61aa68773678-logs\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.379596 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-scripts\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.380078 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.382966 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-combined-ca-bundle\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.410570 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbsl9\" (UniqueName: \"kubernetes.io/projected/dcc64de1-66f2-48e6-969f-61aa68773678-kube-api-access-kbsl9\") pod \"placement-db-sync-kfq2d\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.443527 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfq2d" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.478816 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-scripts\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.478880 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-config-data\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.479009 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.479046 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.479097 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-run-httpd\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.479131 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-log-httpd\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.479159 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrvjt\" (UniqueName: \"kubernetes.io/projected/de95a2d4-539f-46f1-abcc-fe8e46e404ee-kube-api-access-nrvjt\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.483522 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-run-httpd\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.483860 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-log-httpd\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.485192 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-scripts\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.489827 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-config-data\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.502138 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.502317 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.506091 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrvjt\" (UniqueName: \"kubernetes.io/projected/de95a2d4-539f-46f1-abcc-fe8e46e404ee-kube-api-access-nrvjt\") pod \"ceilometer-0\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.538017 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.692424 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-pbjv2"] Jan 28 16:59:51 crc kubenswrapper[4877]: I0128 16:59:51.963700 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pbjv2" event={"ID":"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5","Type":"ContainerStarted","Data":"6963953e61b4e6fecdf05ec777c33a1016c586ddcbb9c42e89f3ae8bb5934532"} Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.039358 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-4wxn6"] Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.054302 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g4rk2"] Jan 28 16:59:52 crc kubenswrapper[4877]: W0128 16:59:52.098724 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f72ad63_197a_4049_8cad_6f897806b522.slice/crio-27fad57bf59ed033fa0538518c867af983e1def6e1eaeafcdb8ee1f091c40936 WatchSource:0}: Error finding container 27fad57bf59ed033fa0538518c867af983e1def6e1eaeafcdb8ee1f091c40936: Status 404 returned error can't find the container with id 27fad57bf59ed033fa0538518c867af983e1def6e1eaeafcdb8ee1f091c40936 Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.105820 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-9n6vh"] Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.119398 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-957zv"] Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.514238 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.589356 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6hfcz"] Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.607667 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-kfq2d"] Jan 28 16:59:52 crc kubenswrapper[4877]: W0128 16:59:52.622843 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddcc64de1_66f2_48e6_969f_61aa68773678.slice/crio-4b3097e26be6d79f6758d39c3b6a3628ee05a0a138f8a74cfdefece65734450e WatchSource:0}: Error finding container 4b3097e26be6d79f6758d39c3b6a3628ee05a0a138f8a74cfdefece65734450e: Status 404 returned error can't find the container with id 4b3097e26be6d79f6758d39c3b6a3628ee05a0a138f8a74cfdefece65734450e Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.624736 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-ktcdh"] Jan 28 16:59:52 crc kubenswrapper[4877]: I0128 16:59:52.641584 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.002519 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfq2d" event={"ID":"dcc64de1-66f2-48e6-969f-61aa68773678","Type":"ContainerStarted","Data":"4b3097e26be6d79f6758d39c3b6a3628ee05a0a138f8a74cfdefece65734450e"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.005811 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" event={"ID":"63b3e104-e1f3-462d-98b7-4ab4f679b619","Type":"ContainerStarted","Data":"10fe46e31ac42442e11a49ed25dec00f1abb1a19605c2ae561e2ca0ccc59dd10"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.009301 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4rk2" event={"ID":"8b598e97-6903-4885-a697-661b4bbe3dc5","Type":"ContainerStarted","Data":"cbc204a5a57dcadecfb4b7b6c50f46294dcef52bc5d1dc252d23dc039d2fb078"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.009339 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4rk2" event={"ID":"8b598e97-6903-4885-a697-661b4bbe3dc5","Type":"ContainerStarted","Data":"711fe02befdc9992a316c3424da7403957d29a40a512b56339001e3e6a44e777"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.011821 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4wxn6" event={"ID":"dc6b6b48-855c-412b-af8b-be4c27962c4b","Type":"ContainerStarted","Data":"9d6ab7a208b419836251b13cfebbbb207c58e5cda6e8ceadb24190892c535403"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.011872 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4wxn6" event={"ID":"dc6b6b48-855c-412b-af8b-be4c27962c4b","Type":"ContainerStarted","Data":"7a22b6ef02d7c08dd5eb99ede0d92690c0f15eca6645efa469f7374f3339dcb6"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.016096 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6hfcz" event={"ID":"ffc3b7f9-8297-46ac-b550-d61d9513187c","Type":"ContainerStarted","Data":"6fc9dc009ba949caaa21fd7a2282d34ab7b11e1eab2f481cf599584965113e86"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.018315 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de95a2d4-539f-46f1-abcc-fe8e46e404ee","Type":"ContainerStarted","Data":"27a83b011a9986615aba5ed86b934f630cdfecf354d611105917ebb56af6c07e"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.036698 4877 generic.go:334] "Generic (PLEG): container finished" podID="6f72ad63-197a-4049-8cad-6f897806b522" containerID="3f5b1178b06ee37e161c41e8a62dd3982a5fc1627cfb74ff9d0650415cec8555" exitCode=0 Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.036812 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" event={"ID":"6f72ad63-197a-4049-8cad-6f897806b522","Type":"ContainerDied","Data":"3f5b1178b06ee37e161c41e8a62dd3982a5fc1627cfb74ff9d0650415cec8555"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.036839 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" event={"ID":"6f72ad63-197a-4049-8cad-6f897806b522","Type":"ContainerStarted","Data":"27fad57bf59ed033fa0538518c867af983e1def6e1eaeafcdb8ee1f091c40936"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.039142 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-957zv" event={"ID":"6becdddb-915e-40e0-ba03-9de124ad56c7","Type":"ContainerStarted","Data":"c213ca938bafe85b2cabd0df83842e2c305a65dc89e093bb8768dedd3b93d6c1"} Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.039265 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-g4rk2" podStartSLOduration=3.039212793 podStartE2EDuration="3.039212793s" podCreationTimestamp="2026-01-28 16:59:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:59:53.026472259 +0000 UTC m=+1496.584799157" watchObservedRunningTime="2026-01-28 16:59:53.039212793 +0000 UTC m=+1496.597539681" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.065934 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-4wxn6" podStartSLOduration=3.065910192 podStartE2EDuration="3.065910192s" podCreationTimestamp="2026-01-28 16:59:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:59:53.058356588 +0000 UTC m=+1496.616683476" watchObservedRunningTime="2026-01-28 16:59:53.065910192 +0000 UTC m=+1496.624237080" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.604018 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.752960 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-config\") pod \"6f72ad63-197a-4049-8cad-6f897806b522\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.753045 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-sb\") pod \"6f72ad63-197a-4049-8cad-6f897806b522\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.753132 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-swift-storage-0\") pod \"6f72ad63-197a-4049-8cad-6f897806b522\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.753202 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-svc\") pod \"6f72ad63-197a-4049-8cad-6f897806b522\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.753236 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwntv\" (UniqueName: \"kubernetes.io/projected/6f72ad63-197a-4049-8cad-6f897806b522-kube-api-access-hwntv\") pod \"6f72ad63-197a-4049-8cad-6f897806b522\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.753352 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-nb\") pod \"6f72ad63-197a-4049-8cad-6f897806b522\" (UID: \"6f72ad63-197a-4049-8cad-6f897806b522\") " Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.778801 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f72ad63-197a-4049-8cad-6f897806b522-kube-api-access-hwntv" (OuterVolumeSpecName: "kube-api-access-hwntv") pod "6f72ad63-197a-4049-8cad-6f897806b522" (UID: "6f72ad63-197a-4049-8cad-6f897806b522"). InnerVolumeSpecName "kube-api-access-hwntv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.810621 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-config" (OuterVolumeSpecName: "config") pod "6f72ad63-197a-4049-8cad-6f897806b522" (UID: "6f72ad63-197a-4049-8cad-6f897806b522"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.813076 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6f72ad63-197a-4049-8cad-6f897806b522" (UID: "6f72ad63-197a-4049-8cad-6f897806b522"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.839931 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6f72ad63-197a-4049-8cad-6f897806b522" (UID: "6f72ad63-197a-4049-8cad-6f897806b522"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.859901 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6f72ad63-197a-4049-8cad-6f897806b522" (UID: "6f72ad63-197a-4049-8cad-6f897806b522"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.861371 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwntv\" (UniqueName: \"kubernetes.io/projected/6f72ad63-197a-4049-8cad-6f897806b522-kube-api-access-hwntv\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.861392 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-config\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.861402 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.861412 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.861420 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.904974 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6f72ad63-197a-4049-8cad-6f897806b522" (UID: "6f72ad63-197a-4049-8cad-6f897806b522"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:59:53 crc kubenswrapper[4877]: I0128 16:59:53.963009 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6f72ad63-197a-4049-8cad-6f897806b522-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 16:59:54 crc kubenswrapper[4877]: I0128 16:59:54.082077 4877 generic.go:334] "Generic (PLEG): container finished" podID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerID="ce3612223051e607b69728d9f2ca0d61b8a64eead01bc6e7152d430627570f27" exitCode=0 Jan 28 16:59:54 crc kubenswrapper[4877]: I0128 16:59:54.082153 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" event={"ID":"63b3e104-e1f3-462d-98b7-4ab4f679b619","Type":"ContainerDied","Data":"ce3612223051e607b69728d9f2ca0d61b8a64eead01bc6e7152d430627570f27"} Jan 28 16:59:54 crc kubenswrapper[4877]: I0128 16:59:54.110601 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" Jan 28 16:59:54 crc kubenswrapper[4877]: I0128 16:59:54.111194 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-9n6vh" event={"ID":"6f72ad63-197a-4049-8cad-6f897806b522","Type":"ContainerDied","Data":"27fad57bf59ed033fa0538518c867af983e1def6e1eaeafcdb8ee1f091c40936"} Jan 28 16:59:54 crc kubenswrapper[4877]: I0128 16:59:54.111250 4877 scope.go:117] "RemoveContainer" containerID="3f5b1178b06ee37e161c41e8a62dd3982a5fc1627cfb74ff9d0650415cec8555" Jan 28 16:59:54 crc kubenswrapper[4877]: I0128 16:59:54.250369 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-9n6vh"] Jan 28 16:59:54 crc kubenswrapper[4877]: I0128 16:59:54.263285 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-9n6vh"] Jan 28 16:59:55 crc kubenswrapper[4877]: I0128 16:59:55.129103 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" event={"ID":"63b3e104-e1f3-462d-98b7-4ab4f679b619","Type":"ContainerStarted","Data":"0bbd9f95078ca6f1c48588dcaa6f31a46d7683d350f5f9c90273b0e7e94db9dd"} Jan 28 16:59:55 crc kubenswrapper[4877]: I0128 16:59:55.130671 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 16:59:55 crc kubenswrapper[4877]: I0128 16:59:55.358230 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f72ad63-197a-4049-8cad-6f897806b522" path="/var/lib/kubelet/pods/6f72ad63-197a-4049-8cad-6f897806b522/volumes" Jan 28 16:59:57 crc kubenswrapper[4877]: I0128 16:59:57.369290 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" podStartSLOduration=7.369270916 podStartE2EDuration="7.369270916s" podCreationTimestamp="2026-01-28 16:59:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 16:59:55.158361588 +0000 UTC m=+1498.716688476" watchObservedRunningTime="2026-01-28 16:59:57.369270916 +0000 UTC m=+1500.927597804" Jan 28 16:59:59 crc kubenswrapper[4877]: I0128 16:59:59.175275 4877 generic.go:334] "Generic (PLEG): container finished" podID="8b598e97-6903-4885-a697-661b4bbe3dc5" containerID="cbc204a5a57dcadecfb4b7b6c50f46294dcef52bc5d1dc252d23dc039d2fb078" exitCode=0 Jan 28 16:59:59 crc kubenswrapper[4877]: I0128 16:59:59.175357 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4rk2" event={"ID":"8b598e97-6903-4885-a697-661b4bbe3dc5","Type":"ContainerDied","Data":"cbc204a5a57dcadecfb4b7b6c50f46294dcef52bc5d1dc252d23dc039d2fb078"} Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.152428 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9"] Jan 28 17:00:00 crc kubenswrapper[4877]: E0128 17:00:00.153183 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f72ad63-197a-4049-8cad-6f897806b522" containerName="init" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.153200 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f72ad63-197a-4049-8cad-6f897806b522" containerName="init" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.153411 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f72ad63-197a-4049-8cad-6f897806b522" containerName="init" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.154005 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9"] Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.154083 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.179311 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.179326 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.231383 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54351f81-b326-424d-8061-5108152ce046-secret-volume\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.231851 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54351f81-b326-424d-8061-5108152ce046-config-volume\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.232245 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnnjv\" (UniqueName: \"kubernetes.io/projected/54351f81-b326-424d-8061-5108152ce046-kube-api-access-lnnjv\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.334153 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54351f81-b326-424d-8061-5108152ce046-config-volume\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.334302 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnnjv\" (UniqueName: \"kubernetes.io/projected/54351f81-b326-424d-8061-5108152ce046-kube-api-access-lnnjv\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.334828 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54351f81-b326-424d-8061-5108152ce046-secret-volume\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.334997 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54351f81-b326-424d-8061-5108152ce046-config-volume\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.343591 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54351f81-b326-424d-8061-5108152ce046-secret-volume\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.351702 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnnjv\" (UniqueName: \"kubernetes.io/projected/54351f81-b326-424d-8061-5108152ce046-kube-api-access-lnnjv\") pod \"collect-profiles-29493660-wbkv9\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:00 crc kubenswrapper[4877]: I0128 17:00:00.539779 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:01 crc kubenswrapper[4877]: I0128 17:00:01.382574 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 17:00:01 crc kubenswrapper[4877]: I0128 17:00:01.454075 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-fblln"] Jan 28 17:00:01 crc kubenswrapper[4877]: I0128 17:00:01.454375 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" containerID="cri-o://f65447dc29a5eafcdb4ea8557741642a3edb658c1c29f148fa00a226c8b97756" gracePeriod=10 Jan 28 17:00:02 crc kubenswrapper[4877]: I0128 17:00:02.241007 4877 generic.go:334] "Generic (PLEG): container finished" podID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerID="f65447dc29a5eafcdb4ea8557741642a3edb658c1c29f148fa00a226c8b97756" exitCode=0 Jan 28 17:00:02 crc kubenswrapper[4877]: I0128 17:00:02.241075 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" event={"ID":"16116e77-e71d-41b1-a821-2c01fbbd71ae","Type":"ContainerDied","Data":"f65447dc29a5eafcdb4ea8557741642a3edb658c1c29f148fa00a226c8b97756"} Jan 28 17:00:02 crc kubenswrapper[4877]: I0128 17:00:02.769194 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.173:5353: connect: connection refused" Jan 28 17:00:07 crc kubenswrapper[4877]: I0128 17:00:07.076217 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:00:07 crc kubenswrapper[4877]: I0128 17:00:07.076912 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:00:07 crc kubenswrapper[4877]: I0128 17:00:07.769037 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.173:5353: connect: connection refused" Jan 28 17:00:08 crc kubenswrapper[4877]: E0128 17:00:08.730057 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Jan 28 17:00:08 crc kubenswrapper[4877]: E0128 17:00:08.730545 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n66ch54fh68dh5bfhfch5c4h5bh6h685h656hf5h5f8h658h699h5dfhcch65h5cfh5d8h56h667h579hbbh684h8fh56fh97hdh569h689hc7h86q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nrvjt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(de95a2d4-539f-46f1-abcc-fe8e46e404ee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.046056 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4rk2" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.226904 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nj8nj\" (UniqueName: \"kubernetes.io/projected/8b598e97-6903-4885-a697-661b4bbe3dc5-kube-api-access-nj8nj\") pod \"8b598e97-6903-4885-a697-661b4bbe3dc5\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.226982 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-scripts\") pod \"8b598e97-6903-4885-a697-661b4bbe3dc5\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.227115 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-fernet-keys\") pod \"8b598e97-6903-4885-a697-661b4bbe3dc5\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.227227 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-combined-ca-bundle\") pod \"8b598e97-6903-4885-a697-661b4bbe3dc5\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.227375 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-config-data\") pod \"8b598e97-6903-4885-a697-661b4bbe3dc5\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.227398 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-credential-keys\") pod \"8b598e97-6903-4885-a697-661b4bbe3dc5\" (UID: \"8b598e97-6903-4885-a697-661b4bbe3dc5\") " Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.234329 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b598e97-6903-4885-a697-661b4bbe3dc5-kube-api-access-nj8nj" (OuterVolumeSpecName: "kube-api-access-nj8nj") pod "8b598e97-6903-4885-a697-661b4bbe3dc5" (UID: "8b598e97-6903-4885-a697-661b4bbe3dc5"). InnerVolumeSpecName "kube-api-access-nj8nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.234808 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "8b598e97-6903-4885-a697-661b4bbe3dc5" (UID: "8b598e97-6903-4885-a697-661b4bbe3dc5"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.240658 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-scripts" (OuterVolumeSpecName: "scripts") pod "8b598e97-6903-4885-a697-661b4bbe3dc5" (UID: "8b598e97-6903-4885-a697-661b4bbe3dc5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.241146 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "8b598e97-6903-4885-a697-661b4bbe3dc5" (UID: "8b598e97-6903-4885-a697-661b4bbe3dc5"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.265191 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-config-data" (OuterVolumeSpecName: "config-data") pod "8b598e97-6903-4885-a697-661b4bbe3dc5" (UID: "8b598e97-6903-4885-a697-661b4bbe3dc5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.290948 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b598e97-6903-4885-a697-661b4bbe3dc5" (UID: "8b598e97-6903-4885-a697-661b4bbe3dc5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.330648 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.330688 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.330702 4877 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.330968 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nj8nj\" (UniqueName: \"kubernetes.io/projected/8b598e97-6903-4885-a697-661b4bbe3dc5-kube-api-access-nj8nj\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.331557 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.331848 4877 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b598e97-6903-4885-a697-661b4bbe3dc5-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.352952 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4rk2" event={"ID":"8b598e97-6903-4885-a697-661b4bbe3dc5","Type":"ContainerDied","Data":"711fe02befdc9992a316c3424da7403957d29a40a512b56339001e3e6a44e777"} Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.353000 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="711fe02befdc9992a316c3424da7403957d29a40a512b56339001e3e6a44e777" Jan 28 17:00:12 crc kubenswrapper[4877]: I0128 17:00:12.353038 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4rk2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.133927 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-g4rk2"] Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.146589 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-g4rk2"] Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.242618 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-lsxs2"] Jan 28 17:00:13 crc kubenswrapper[4877]: E0128 17:00:13.243127 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b598e97-6903-4885-a697-661b4bbe3dc5" containerName="keystone-bootstrap" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.243144 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b598e97-6903-4885-a697-661b4bbe3dc5" containerName="keystone-bootstrap" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.243340 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b598e97-6903-4885-a697-661b4bbe3dc5" containerName="keystone-bootstrap" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.244080 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.246917 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.247119 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.247201 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-45ggb" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.247202 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.247586 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.259751 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lsxs2"] Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.342830 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b598e97-6903-4885-a697-661b4bbe3dc5" path="/var/lib/kubelet/pods/8b598e97-6903-4885-a697-661b4bbe3dc5/volumes" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.360380 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-combined-ca-bundle\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.360566 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-credential-keys\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.360603 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-fernet-keys\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.360637 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6htxs\" (UniqueName: \"kubernetes.io/projected/a32bcea9-5341-40a1-9715-a43829459366-kube-api-access-6htxs\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.360680 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-scripts\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.360813 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-config-data\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.462270 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-config-data\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.462371 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-combined-ca-bundle\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.462447 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-credential-keys\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.462491 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-fernet-keys\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.462520 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6htxs\" (UniqueName: \"kubernetes.io/projected/a32bcea9-5341-40a1-9715-a43829459366-kube-api-access-6htxs\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.462557 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-scripts\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.469353 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-credential-keys\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.469494 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-config-data\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.469678 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-scripts\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.470754 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-combined-ca-bundle\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.475753 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-fernet-keys\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.479870 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6htxs\" (UniqueName: \"kubernetes.io/projected/a32bcea9-5341-40a1-9715-a43829459366-kube-api-access-6htxs\") pod \"keystone-bootstrap-lsxs2\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:13 crc kubenswrapper[4877]: I0128 17:00:13.579100 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:14 crc kubenswrapper[4877]: E0128 17:00:14.150292 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Jan 28 17:00:14 crc kubenswrapper[4877]: E0128 17:00:14.150732 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kbsl9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-kfq2d_openstack(dcc64de1-66f2-48e6-969f-61aa68773678): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:00:14 crc kubenswrapper[4877]: E0128 17:00:14.152465 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-kfq2d" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" Jan 28 17:00:14 crc kubenswrapper[4877]: E0128 17:00:14.373996 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-kfq2d" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" Jan 28 17:00:17 crc kubenswrapper[4877]: I0128 17:00:17.768459 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.173:5353: i/o timeout" Jan 28 17:00:17 crc kubenswrapper[4877]: I0128 17:00:17.769201 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 17:00:19 crc kubenswrapper[4877]: E0128 17:00:19.464778 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 28 17:00:19 crc kubenswrapper[4877]: E0128 17:00:19.466972 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r65vk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-6hfcz_openstack(ffc3b7f9-8297-46ac-b550-d61d9513187c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:00:19 crc kubenswrapper[4877]: E0128 17:00:19.468211 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-6hfcz" podUID="ffc3b7f9-8297-46ac-b550-d61d9513187c" Jan 28 17:00:20 crc kubenswrapper[4877]: E0128 17:00:20.458019 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-6hfcz" podUID="ffc3b7f9-8297-46ac-b550-d61d9513187c" Jan 28 17:00:22 crc kubenswrapper[4877]: I0128 17:00:22.769681 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.173:5353: i/o timeout" Jan 28 17:00:27 crc kubenswrapper[4877]: I0128 17:00:27.770872 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.173:5353: i/o timeout" Jan 28 17:00:28 crc kubenswrapper[4877]: E0128 17:00:28.091501 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Jan 28 17:00:28 crc kubenswrapper[4877]: E0128 17:00:28.091666 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8plmh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-pbjv2_openstack(0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:00:28 crc kubenswrapper[4877]: E0128 17:00:28.092741 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-pbjv2" podUID="0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.134218 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.296641 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nvjr\" (UniqueName: \"kubernetes.io/projected/16116e77-e71d-41b1-a821-2c01fbbd71ae-kube-api-access-7nvjr\") pod \"16116e77-e71d-41b1-a821-2c01fbbd71ae\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.296738 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-swift-storage-0\") pod \"16116e77-e71d-41b1-a821-2c01fbbd71ae\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.296770 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-config\") pod \"16116e77-e71d-41b1-a821-2c01fbbd71ae\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.296945 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-svc\") pod \"16116e77-e71d-41b1-a821-2c01fbbd71ae\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.296987 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-sb\") pod \"16116e77-e71d-41b1-a821-2c01fbbd71ae\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.297124 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-nb\") pod \"16116e77-e71d-41b1-a821-2c01fbbd71ae\" (UID: \"16116e77-e71d-41b1-a821-2c01fbbd71ae\") " Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.320665 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16116e77-e71d-41b1-a821-2c01fbbd71ae-kube-api-access-7nvjr" (OuterVolumeSpecName: "kube-api-access-7nvjr") pod "16116e77-e71d-41b1-a821-2c01fbbd71ae" (UID: "16116e77-e71d-41b1-a821-2c01fbbd71ae"). InnerVolumeSpecName "kube-api-access-7nvjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.357896 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-config" (OuterVolumeSpecName: "config") pod "16116e77-e71d-41b1-a821-2c01fbbd71ae" (UID: "16116e77-e71d-41b1-a821-2c01fbbd71ae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.362759 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "16116e77-e71d-41b1-a821-2c01fbbd71ae" (UID: "16116e77-e71d-41b1-a821-2c01fbbd71ae"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.364075 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "16116e77-e71d-41b1-a821-2c01fbbd71ae" (UID: "16116e77-e71d-41b1-a821-2c01fbbd71ae"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.372591 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "16116e77-e71d-41b1-a821-2c01fbbd71ae" (UID: "16116e77-e71d-41b1-a821-2c01fbbd71ae"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.373908 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "16116e77-e71d-41b1-a821-2c01fbbd71ae" (UID: "16116e77-e71d-41b1-a821-2c01fbbd71ae"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.399857 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.399903 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.399915 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.399925 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nvjr\" (UniqueName: \"kubernetes.io/projected/16116e77-e71d-41b1-a821-2c01fbbd71ae-kube-api-access-7nvjr\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.399936 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.399946 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16116e77-e71d-41b1-a821-2c01fbbd71ae-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.546697 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.546865 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" event={"ID":"16116e77-e71d-41b1-a821-2c01fbbd71ae","Type":"ContainerDied","Data":"ace95f8d129077569fc1e1f5f30930f6eabd368da59746a16942631e0c549254"} Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.546949 4877 scope.go:117] "RemoveContainer" containerID="f65447dc29a5eafcdb4ea8557741642a3edb658c1c29f148fa00a226c8b97756" Jan 28 17:00:28 crc kubenswrapper[4877]: E0128 17:00:28.549923 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-pbjv2" podUID="0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.587494 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-fblln"] Jan 28 17:00:28 crc kubenswrapper[4877]: I0128 17:00:28.600108 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-fblln"] Jan 28 17:00:29 crc kubenswrapper[4877]: I0128 17:00:29.346506 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" path="/var/lib/kubelet/pods/16116e77-e71d-41b1-a821-2c01fbbd71ae/volumes" Jan 28 17:00:32 crc kubenswrapper[4877]: I0128 17:00:32.772784 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-fblln" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.173:5353: i/o timeout" Jan 28 17:00:37 crc kubenswrapper[4877]: I0128 17:00:37.077003 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:00:37 crc kubenswrapper[4877]: I0128 17:00:37.077353 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:00:37 crc kubenswrapper[4877]: I0128 17:00:37.077405 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:00:37 crc kubenswrapper[4877]: I0128 17:00:37.078231 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:00:37 crc kubenswrapper[4877]: I0128 17:00:37.078292 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" gracePeriod=600 Jan 28 17:00:38 crc kubenswrapper[4877]: I0128 17:00:38.690743 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" exitCode=0 Jan 28 17:00:38 crc kubenswrapper[4877]: I0128 17:00:38.690825 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82"} Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.159345 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.163207 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kbsl9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-kfq2d_openstack(dcc64de1-66f2-48e6-969f-61aa68773678): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:00:40 crc kubenswrapper[4877]: I0128 17:00:40.159370 4877 scope.go:117] "RemoveContainer" containerID="dd7306c5a3a689e3e39fb89c4d4f8d80a2f16754ce2f42c672b602df1152187e" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.164593 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-kfq2d" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.241133 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:00:40 crc kubenswrapper[4877]: I0128 17:00:40.281126 4877 scope.go:117] "RemoveContainer" containerID="f756cfb15c7c947e0f669ee8051d31638e5edc388c7a044a2e8411c49dfcce24" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.346897 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.347065 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8vnj2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-957zv_openstack(6becdddb-915e-40e0-ba03-9de124ad56c7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.348403 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-957zv" podUID="6becdddb-915e-40e0-ba03-9de124ad56c7" Jan 28 17:00:40 crc kubenswrapper[4877]: I0128 17:00:40.654635 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9"] Jan 28 17:00:40 crc kubenswrapper[4877]: I0128 17:00:40.721510 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.722033 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:00:40 crc kubenswrapper[4877]: E0128 17:00:40.722748 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-957zv" podUID="6becdddb-915e-40e0-ba03-9de124ad56c7" Jan 28 17:00:40 crc kubenswrapper[4877]: I0128 17:00:40.826169 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lsxs2"] Jan 28 17:00:41 crc kubenswrapper[4877]: W0128 17:00:41.116274 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54351f81_b326_424d_8061_5108152ce046.slice/crio-bf0e1b618f1255ea97bac72f1e356e09adca64d48a6ed9e0c0940ccb5ebbef84 WatchSource:0}: Error finding container bf0e1b618f1255ea97bac72f1e356e09adca64d48a6ed9e0c0940ccb5ebbef84: Status 404 returned error can't find the container with id bf0e1b618f1255ea97bac72f1e356e09adca64d48a6ed9e0c0940ccb5ebbef84 Jan 28 17:00:41 crc kubenswrapper[4877]: W0128 17:00:41.119758 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda32bcea9_5341_40a1_9715_a43829459366.slice/crio-462633fe9da6ae76b21473263dbe2a390c9a5b60067eaf35f2405466f18bcce7 WatchSource:0}: Error finding container 462633fe9da6ae76b21473263dbe2a390c9a5b60067eaf35f2405466f18bcce7: Status 404 returned error can't find the container with id 462633fe9da6ae76b21473263dbe2a390c9a5b60067eaf35f2405466f18bcce7 Jan 28 17:00:41 crc kubenswrapper[4877]: E0128 17:00:41.147533 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified" Jan 28 17:00:41 crc kubenswrapper[4877]: E0128 17:00:41.147703 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-notification-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n66ch54fh68dh5bfhfch5c4h5bh6h685h656hf5h5f8h658h699h5dfhcch65h5cfh5d8h56h667h579hbbh684h8fh56fh97hdh569h689hc7h86q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-notification-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nrvjt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/notificationhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(de95a2d4-539f-46f1-abcc-fe8e46e404ee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.732162 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lsxs2" event={"ID":"a32bcea9-5341-40a1-9715-a43829459366","Type":"ContainerStarted","Data":"cb0f7f42332faecb63ab5cac702d89613ab5ae387157d537a0150334bccae91f"} Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.732630 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lsxs2" event={"ID":"a32bcea9-5341-40a1-9715-a43829459366","Type":"ContainerStarted","Data":"462633fe9da6ae76b21473263dbe2a390c9a5b60067eaf35f2405466f18bcce7"} Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.733774 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6hfcz" event={"ID":"ffc3b7f9-8297-46ac-b550-d61d9513187c","Type":"ContainerStarted","Data":"2a6bcb2c9bee3ea8986746fd04aa1f77232ae6fe9b206c22bb8a12505700f928"} Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.735813 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" event={"ID":"54351f81-b326-424d-8061-5108152ce046","Type":"ContainerStarted","Data":"3c996a19f468a70994d82ee768412f94d27a7d28bcbfd15dbbef65ec0eee11ed"} Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.735847 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" event={"ID":"54351f81-b326-424d-8061-5108152ce046","Type":"ContainerStarted","Data":"bf0e1b618f1255ea97bac72f1e356e09adca64d48a6ed9e0c0940ccb5ebbef84"} Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.763224 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-lsxs2" podStartSLOduration=28.763201146 podStartE2EDuration="28.763201146s" podCreationTimestamp="2026-01-28 17:00:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:00:41.756486555 +0000 UTC m=+1545.314813453" watchObservedRunningTime="2026-01-28 17:00:41.763201146 +0000 UTC m=+1545.321528034" Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.776307 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" podStartSLOduration=41.776291478 podStartE2EDuration="41.776291478s" podCreationTimestamp="2026-01-28 17:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:00:41.776016051 +0000 UTC m=+1545.334342959" watchObservedRunningTime="2026-01-28 17:00:41.776291478 +0000 UTC m=+1545.334618366" Jan 28 17:00:41 crc kubenswrapper[4877]: I0128 17:00:41.797443 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-6hfcz" podStartSLOduration=3.999567102 podStartE2EDuration="51.797422657s" podCreationTimestamp="2026-01-28 16:59:50 +0000 UTC" firstStartedPulling="2026-01-28 16:59:52.616979428 +0000 UTC m=+1496.175306306" lastFinishedPulling="2026-01-28 17:00:40.414834973 +0000 UTC m=+1543.973161861" observedRunningTime="2026-01-28 17:00:41.792201257 +0000 UTC m=+1545.350528145" watchObservedRunningTime="2026-01-28 17:00:41.797422657 +0000 UTC m=+1545.355749555" Jan 28 17:00:42 crc kubenswrapper[4877]: I0128 17:00:42.746389 4877 generic.go:334] "Generic (PLEG): container finished" podID="54351f81-b326-424d-8061-5108152ce046" containerID="3c996a19f468a70994d82ee768412f94d27a7d28bcbfd15dbbef65ec0eee11ed" exitCode=0 Jan 28 17:00:42 crc kubenswrapper[4877]: I0128 17:00:42.746517 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" event={"ID":"54351f81-b326-424d-8061-5108152ce046","Type":"ContainerDied","Data":"3c996a19f468a70994d82ee768412f94d27a7d28bcbfd15dbbef65ec0eee11ed"} Jan 28 17:00:42 crc kubenswrapper[4877]: I0128 17:00:42.750039 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pbjv2" event={"ID":"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5","Type":"ContainerStarted","Data":"91c27be8ad2a3cedce2f5a618a9a9bace58c38236fb7bf48451a18fba21e8ee1"} Jan 28 17:00:43 crc kubenswrapper[4877]: I0128 17:00:43.788428 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-pbjv2" podStartSLOduration=3.081933142 podStartE2EDuration="53.78840148s" podCreationTimestamp="2026-01-28 16:59:50 +0000 UTC" firstStartedPulling="2026-01-28 16:59:51.705453073 +0000 UTC m=+1495.263779961" lastFinishedPulling="2026-01-28 17:00:42.411921411 +0000 UTC m=+1545.970248299" observedRunningTime="2026-01-28 17:00:43.778291948 +0000 UTC m=+1547.336618836" watchObservedRunningTime="2026-01-28 17:00:43.78840148 +0000 UTC m=+1547.346728368" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.193165 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.224298 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54351f81-b326-424d-8061-5108152ce046-secret-volume\") pod \"54351f81-b326-424d-8061-5108152ce046\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.224366 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnnjv\" (UniqueName: \"kubernetes.io/projected/54351f81-b326-424d-8061-5108152ce046-kube-api-access-lnnjv\") pod \"54351f81-b326-424d-8061-5108152ce046\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.224506 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54351f81-b326-424d-8061-5108152ce046-config-volume\") pod \"54351f81-b326-424d-8061-5108152ce046\" (UID: \"54351f81-b326-424d-8061-5108152ce046\") " Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.227087 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54351f81-b326-424d-8061-5108152ce046-config-volume" (OuterVolumeSpecName: "config-volume") pod "54351f81-b326-424d-8061-5108152ce046" (UID: "54351f81-b326-424d-8061-5108152ce046"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.234148 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54351f81-b326-424d-8061-5108152ce046-kube-api-access-lnnjv" (OuterVolumeSpecName: "kube-api-access-lnnjv") pod "54351f81-b326-424d-8061-5108152ce046" (UID: "54351f81-b326-424d-8061-5108152ce046"). InnerVolumeSpecName "kube-api-access-lnnjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.238679 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54351f81-b326-424d-8061-5108152ce046-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "54351f81-b326-424d-8061-5108152ce046" (UID: "54351f81-b326-424d-8061-5108152ce046"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.326624 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54351f81-b326-424d-8061-5108152ce046-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.326657 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54351f81-b326-424d-8061-5108152ce046-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.326667 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnnjv\" (UniqueName: \"kubernetes.io/projected/54351f81-b326-424d-8061-5108152ce046-kube-api-access-lnnjv\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.774498 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" event={"ID":"54351f81-b326-424d-8061-5108152ce046","Type":"ContainerDied","Data":"bf0e1b618f1255ea97bac72f1e356e09adca64d48a6ed9e0c0940ccb5ebbef84"} Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.774549 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf0e1b618f1255ea97bac72f1e356e09adca64d48a6ed9e0c0940ccb5ebbef84" Jan 28 17:00:44 crc kubenswrapper[4877]: I0128 17:00:44.774796 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9" Jan 28 17:00:48 crc kubenswrapper[4877]: I0128 17:00:48.825820 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de95a2d4-539f-46f1-abcc-fe8e46e404ee","Type":"ContainerStarted","Data":"a1ce63d58c81cf657fad7b110f07c592e07651cccafff64066f7596f77886d33"} Jan 28 17:00:49 crc kubenswrapper[4877]: I0128 17:00:49.841384 4877 generic.go:334] "Generic (PLEG): container finished" podID="534e973d-d29f-4aac-8922-5f42d27c0770" containerID="08f7c5897d2ff879abbf30d741b68b0330db81c5cc4d9849652a8f75af4661dc" exitCode=0 Jan 28 17:00:49 crc kubenswrapper[4877]: I0128 17:00:49.841511 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lptvh" event={"ID":"534e973d-d29f-4aac-8922-5f42d27c0770","Type":"ContainerDied","Data":"08f7c5897d2ff879abbf30d741b68b0330db81c5cc4d9849652a8f75af4661dc"} Jan 28 17:00:50 crc kubenswrapper[4877]: I0128 17:00:50.853607 4877 generic.go:334] "Generic (PLEG): container finished" podID="a32bcea9-5341-40a1-9715-a43829459366" containerID="cb0f7f42332faecb63ab5cac702d89613ab5ae387157d537a0150334bccae91f" exitCode=0 Jan 28 17:00:50 crc kubenswrapper[4877]: I0128 17:00:50.853698 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lsxs2" event={"ID":"a32bcea9-5341-40a1-9715-a43829459366","Type":"ContainerDied","Data":"cb0f7f42332faecb63ab5cac702d89613ab5ae387157d537a0150334bccae91f"} Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.600824 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lptvh" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.715367 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-combined-ca-bundle\") pod \"534e973d-d29f-4aac-8922-5f42d27c0770\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.715555 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zvpx\" (UniqueName: \"kubernetes.io/projected/534e973d-d29f-4aac-8922-5f42d27c0770-kube-api-access-9zvpx\") pod \"534e973d-d29f-4aac-8922-5f42d27c0770\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.715716 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-config-data\") pod \"534e973d-d29f-4aac-8922-5f42d27c0770\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.715775 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-db-sync-config-data\") pod \"534e973d-d29f-4aac-8922-5f42d27c0770\" (UID: \"534e973d-d29f-4aac-8922-5f42d27c0770\") " Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.724808 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/534e973d-d29f-4aac-8922-5f42d27c0770-kube-api-access-9zvpx" (OuterVolumeSpecName: "kube-api-access-9zvpx") pod "534e973d-d29f-4aac-8922-5f42d27c0770" (UID: "534e973d-d29f-4aac-8922-5f42d27c0770"). InnerVolumeSpecName "kube-api-access-9zvpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.725674 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "534e973d-d29f-4aac-8922-5f42d27c0770" (UID: "534e973d-d29f-4aac-8922-5f42d27c0770"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.749441 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "534e973d-d29f-4aac-8922-5f42d27c0770" (UID: "534e973d-d29f-4aac-8922-5f42d27c0770"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.775554 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-config-data" (OuterVolumeSpecName: "config-data") pod "534e973d-d29f-4aac-8922-5f42d27c0770" (UID: "534e973d-d29f-4aac-8922-5f42d27c0770"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.818016 4877 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.818055 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.818068 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zvpx\" (UniqueName: \"kubernetes.io/projected/534e973d-d29f-4aac-8922-5f42d27c0770-kube-api-access-9zvpx\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.818099 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/534e973d-d29f-4aac-8922-5f42d27c0770-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.868601 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lptvh" event={"ID":"534e973d-d29f-4aac-8922-5f42d27c0770","Type":"ContainerDied","Data":"6b9c06660e5730d7b9e6e3cee148ca7da69c2214fb29f2bfd6e619aa75b182e0"} Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.868641 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lptvh" Jan 28 17:00:51 crc kubenswrapper[4877]: I0128 17:00:51.868643 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b9c06660e5730d7b9e6e3cee148ca7da69c2214fb29f2bfd6e619aa75b182e0" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.243369 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.337816 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6htxs\" (UniqueName: \"kubernetes.io/projected/a32bcea9-5341-40a1-9715-a43829459366-kube-api-access-6htxs\") pod \"a32bcea9-5341-40a1-9715-a43829459366\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.338648 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-credential-keys\") pod \"a32bcea9-5341-40a1-9715-a43829459366\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.338681 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-fernet-keys\") pod \"a32bcea9-5341-40a1-9715-a43829459366\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.338728 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-combined-ca-bundle\") pod \"a32bcea9-5341-40a1-9715-a43829459366\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.338839 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-scripts\") pod \"a32bcea9-5341-40a1-9715-a43829459366\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.338968 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-config-data\") pod \"a32bcea9-5341-40a1-9715-a43829459366\" (UID: \"a32bcea9-5341-40a1-9715-a43829459366\") " Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.349947 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a32bcea9-5341-40a1-9715-a43829459366" (UID: "a32bcea9-5341-40a1-9715-a43829459366"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.352323 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a32bcea9-5341-40a1-9715-a43829459366-kube-api-access-6htxs" (OuterVolumeSpecName: "kube-api-access-6htxs") pod "a32bcea9-5341-40a1-9715-a43829459366" (UID: "a32bcea9-5341-40a1-9715-a43829459366"). InnerVolumeSpecName "kube-api-access-6htxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.353758 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a32bcea9-5341-40a1-9715-a43829459366" (UID: "a32bcea9-5341-40a1-9715-a43829459366"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.373233 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-scripts" (OuterVolumeSpecName: "scripts") pod "a32bcea9-5341-40a1-9715-a43829459366" (UID: "a32bcea9-5341-40a1-9715-a43829459366"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.394007 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-b72zf"] Jan 28 17:00:52 crc kubenswrapper[4877]: E0128 17:00:52.397643 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a32bcea9-5341-40a1-9715-a43829459366" containerName="keystone-bootstrap" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.397685 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a32bcea9-5341-40a1-9715-a43829459366" containerName="keystone-bootstrap" Jan 28 17:00:52 crc kubenswrapper[4877]: E0128 17:00:52.397709 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.397718 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" Jan 28 17:00:52 crc kubenswrapper[4877]: E0128 17:00:52.397742 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="534e973d-d29f-4aac-8922-5f42d27c0770" containerName="glance-db-sync" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.397751 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="534e973d-d29f-4aac-8922-5f42d27c0770" containerName="glance-db-sync" Jan 28 17:00:52 crc kubenswrapper[4877]: E0128 17:00:52.397766 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54351f81-b326-424d-8061-5108152ce046" containerName="collect-profiles" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.397774 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="54351f81-b326-424d-8061-5108152ce046" containerName="collect-profiles" Jan 28 17:00:52 crc kubenswrapper[4877]: E0128 17:00:52.397789 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="init" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.397799 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="init" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.398073 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="534e973d-d29f-4aac-8922-5f42d27c0770" containerName="glance-db-sync" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.398095 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="54351f81-b326-424d-8061-5108152ce046" containerName="collect-profiles" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.398117 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="16116e77-e71d-41b1-a821-2c01fbbd71ae" containerName="dnsmasq-dns" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.398133 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="a32bcea9-5341-40a1-9715-a43829459366" containerName="keystone-bootstrap" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.408554 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-b72zf"] Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.408663 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.433061 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a32bcea9-5341-40a1-9715-a43829459366" (UID: "a32bcea9-5341-40a1-9715-a43829459366"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.443246 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.443292 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6htxs\" (UniqueName: \"kubernetes.io/projected/a32bcea9-5341-40a1-9715-a43829459366-kube-api-access-6htxs\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.443307 4877 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.443319 4877 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.443331 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.448731 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-config-data" (OuterVolumeSpecName: "config-data") pod "a32bcea9-5341-40a1-9715-a43829459366" (UID: "a32bcea9-5341-40a1-9715-a43829459366"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.546376 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.546500 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-config\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.546623 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.546688 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm5hl\" (UniqueName: \"kubernetes.io/projected/9efc1e14-125d-467c-ae5e-b124a75d455d-kube-api-access-nm5hl\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.546807 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.546860 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.546936 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a32bcea9-5341-40a1-9715-a43829459366-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.651604 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.651737 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm5hl\" (UniqueName: \"kubernetes.io/projected/9efc1e14-125d-467c-ae5e-b124a75d455d-kube-api-access-nm5hl\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.651893 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.651953 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.652053 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.652147 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-config\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.652995 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.653094 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-config\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.653605 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.654363 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.654956 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.688359 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm5hl\" (UniqueName: \"kubernetes.io/projected/9efc1e14-125d-467c-ae5e-b124a75d455d-kube-api-access-nm5hl\") pod \"dnsmasq-dns-8b5c85b87-b72zf\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.767293 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.912466 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lsxs2" event={"ID":"a32bcea9-5341-40a1-9715-a43829459366","Type":"ContainerDied","Data":"462633fe9da6ae76b21473263dbe2a390c9a5b60067eaf35f2405466f18bcce7"} Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.912547 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="462633fe9da6ae76b21473263dbe2a390c9a5b60067eaf35f2405466f18bcce7" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.912567 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lsxs2" Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.995647 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-85cd7458d6-gb267"] Jan 28 17:00:52 crc kubenswrapper[4877]: I0128 17:00:52.997758 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.000029 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.002410 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.003922 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.004109 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-45ggb" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.004836 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.004986 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.024237 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-85cd7458d6-gb267"] Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.170851 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-scripts\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.170933 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-fernet-keys\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.170957 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-credential-keys\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.170974 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-config-data\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.171253 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmjvb\" (UniqueName: \"kubernetes.io/projected/dfdeacfe-509f-4906-9764-8b325a80ad03-kube-api-access-hmjvb\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.171323 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-combined-ca-bundle\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.171542 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-internal-tls-certs\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.171602 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-public-tls-certs\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.270953 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.273267 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.275574 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.277118 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.277293 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jptj6" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.277782 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-scripts\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.277864 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-fernet-keys\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.277892 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-credential-keys\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.277915 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-config-data\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.278038 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmjvb\" (UniqueName: \"kubernetes.io/projected/dfdeacfe-509f-4906-9764-8b325a80ad03-kube-api-access-hmjvb\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.278079 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-combined-ca-bundle\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.278186 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-internal-tls-certs\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.278216 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-public-tls-certs\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.285671 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.286151 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-fernet-keys\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.294040 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-public-tls-certs\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.297290 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-combined-ca-bundle\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.299176 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-credential-keys\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.300417 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmjvb\" (UniqueName: \"kubernetes.io/projected/dfdeacfe-509f-4906-9764-8b325a80ad03-kube-api-access-hmjvb\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.313390 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-scripts\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.314778 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-internal-tls-certs\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.321037 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdeacfe-509f-4906-9764-8b325a80ad03-config-data\") pod \"keystone-85cd7458d6-gb267\" (UID: \"dfdeacfe-509f-4906-9764-8b325a80ad03\") " pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.339152 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.339515 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:00:53 crc kubenswrapper[4877]: E0128 17:00:53.341143 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.381795 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.381904 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.381981 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.382042 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.382069 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.382088 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-logs\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.382301 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6l8q\" (UniqueName: \"kubernetes.io/projected/d4a2b524-6312-4ae9-9970-bc4865c5de49-kube-api-access-q6l8q\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.413495 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-b72zf"] Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.484147 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.484575 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.484614 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.484643 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-logs\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.484795 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6l8q\" (UniqueName: \"kubernetes.io/projected/d4a2b524-6312-4ae9-9970-bc4865c5de49-kube-api-access-q6l8q\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.484917 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.484962 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.486095 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-logs\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.486570 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.516151 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.517262 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.517857 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.517904 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5992022521d9a8a870a78abda3cb3974fc8658c66623b3cbfa6fe8c84dc59df6/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.589502 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.593897 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.633562 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.633687 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.651806 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.668912 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.678595 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6l8q\" (UniqueName: \"kubernetes.io/projected/d4a2b524-6312-4ae9-9970-bc4865c5de49-kube-api-access-q6l8q\") pod \"glance-default-external-api-0\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " pod="openstack/glance-default-external-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.713039 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.713171 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-logs\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.713194 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wjdc\" (UniqueName: \"kubernetes.io/projected/09aef551-5a7b-422f-bcc8-643001c12655-kube-api-access-7wjdc\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.713238 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.715857 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.715971 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-scripts\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.716669 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.822887 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.823258 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-logs\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.823279 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wjdc\" (UniqueName: \"kubernetes.io/projected/09aef551-5a7b-422f-bcc8-643001c12655-kube-api-access-7wjdc\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.823844 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.823936 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.824027 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-scripts\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.824069 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.824204 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.825167 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-logs\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.828351 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.828388 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c970fd9b051e1e4708c5b978fbab2178e7872a320f8059867f5cd332a890e640/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.830659 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.840124 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-scripts\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.846322 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.847228 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wjdc\" (UniqueName: \"kubernetes.io/projected/09aef551-5a7b-422f-bcc8-643001c12655-kube-api-access-7wjdc\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.875413 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.892080 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-85cd7458d6-gb267"] Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.932784 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-957zv" event={"ID":"6becdddb-915e-40e0-ba03-9de124ad56c7","Type":"ContainerStarted","Data":"d67867eb4e420db2210dc7a867be431ead9a5ee36c8f4aeb0e89367ae7b38663"} Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.936466 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" event={"ID":"9efc1e14-125d-467c-ae5e-b124a75d455d","Type":"ContainerStarted","Data":"2d24dbd728ef68cd283ebc5c21bcb772db95c6997cb3ec57ea1145b1b78f2211"} Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.939926 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-85cd7458d6-gb267" event={"ID":"dfdeacfe-509f-4906-9764-8b325a80ad03","Type":"ContainerStarted","Data":"1a0ea21175d08bf3d57dcca74eb50bad1ba0e160bbe018ed2d5ab5dd54000afd"} Jan 28 17:00:53 crc kubenswrapper[4877]: I0128 17:00:53.965212 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.073135 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.669209 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-957zv" podStartSLOduration=4.71583314 podStartE2EDuration="1m4.669188121s" podCreationTimestamp="2026-01-28 16:59:50 +0000 UTC" firstStartedPulling="2026-01-28 16:59:52.17567371 +0000 UTC m=+1495.734000598" lastFinishedPulling="2026-01-28 17:00:52.129028691 +0000 UTC m=+1555.687355579" observedRunningTime="2026-01-28 17:00:53.951123364 +0000 UTC m=+1557.509450252" watchObservedRunningTime="2026-01-28 17:00:54.669188121 +0000 UTC m=+1558.227515009" Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.689705 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:00:54 crc kubenswrapper[4877]: W0128 17:00:54.712698 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4a2b524_6312_4ae9_9970_bc4865c5de49.slice/crio-5e1440e6c9c49c10021f2d66cc11b8595fd04c9fa3325b42f02594c61dd076bd WatchSource:0}: Error finding container 5e1440e6c9c49c10021f2d66cc11b8595fd04c9fa3325b42f02594c61dd076bd: Status 404 returned error can't find the container with id 5e1440e6c9c49c10021f2d66cc11b8595fd04c9fa3325b42f02594c61dd076bd Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.778940 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.953286 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09aef551-5a7b-422f-bcc8-643001c12655","Type":"ContainerStarted","Data":"7522b252bf57c8296f440595822e271d56a825e057b411c81b1ca8574314f2a4"} Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.962831 4877 generic.go:334] "Generic (PLEG): container finished" podID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerID="dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285" exitCode=0 Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.962929 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" event={"ID":"9efc1e14-125d-467c-ae5e-b124a75d455d","Type":"ContainerDied","Data":"dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285"} Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.964776 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a2b524-6312-4ae9-9970-bc4865c5de49","Type":"ContainerStarted","Data":"5e1440e6c9c49c10021f2d66cc11b8595fd04c9fa3325b42f02594c61dd076bd"} Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.968626 4877 generic.go:334] "Generic (PLEG): container finished" podID="ffc3b7f9-8297-46ac-b550-d61d9513187c" containerID="2a6bcb2c9bee3ea8986746fd04aa1f77232ae6fe9b206c22bb8a12505700f928" exitCode=0 Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.968728 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6hfcz" event={"ID":"ffc3b7f9-8297-46ac-b550-d61d9513187c","Type":"ContainerDied","Data":"2a6bcb2c9bee3ea8986746fd04aa1f77232ae6fe9b206c22bb8a12505700f928"} Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.972009 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-85cd7458d6-gb267" event={"ID":"dfdeacfe-509f-4906-9764-8b325a80ad03","Type":"ContainerStarted","Data":"61a2c10df5cf589d669d3bf4b710172842260c5571e9a400e349213665aabd99"} Jan 28 17:00:54 crc kubenswrapper[4877]: I0128 17:00:54.972354 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:00:55 crc kubenswrapper[4877]: I0128 17:00:55.010664 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-85cd7458d6-gb267" podStartSLOduration=3.010641868 podStartE2EDuration="3.010641868s" podCreationTimestamp="2026-01-28 17:00:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:00:55.00993683 +0000 UTC m=+1558.568263718" watchObservedRunningTime="2026-01-28 17:00:55.010641868 +0000 UTC m=+1558.568968756" Jan 28 17:00:55 crc kubenswrapper[4877]: E0128 17:00:55.346730 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-kfq2d" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" Jan 28 17:00:55 crc kubenswrapper[4877]: I0128 17:00:55.515316 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:00:55 crc kubenswrapper[4877]: I0128 17:00:55.650087 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:00:55 crc kubenswrapper[4877]: I0128 17:00:55.996059 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09aef551-5a7b-422f-bcc8-643001c12655","Type":"ContainerStarted","Data":"bdab1fa2229e7b13c887a1801b8b7c2e351be246fe974453360e248a5cd0304a"} Jan 28 17:00:55 crc kubenswrapper[4877]: I0128 17:00:55.998820 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" event={"ID":"9efc1e14-125d-467c-ae5e-b124a75d455d","Type":"ContainerStarted","Data":"05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2"} Jan 28 17:00:56 crc kubenswrapper[4877]: I0128 17:00:56.000171 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:00:56 crc kubenswrapper[4877]: I0128 17:00:56.008966 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a2b524-6312-4ae9-9970-bc4865c5de49","Type":"ContainerStarted","Data":"d7cf7ce98aae3591d2f78feff522feffc40894ac2b964211bbfe73f277fe099b"} Jan 28 17:00:56 crc kubenswrapper[4877]: I0128 17:00:56.043435 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" podStartSLOduration=4.0434137549999996 podStartE2EDuration="4.043413755s" podCreationTimestamp="2026-01-28 17:00:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:00:56.021183398 +0000 UTC m=+1559.579510306" watchObservedRunningTime="2026-01-28 17:00:56.043413755 +0000 UTC m=+1559.601740643" Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.025922 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09aef551-5a7b-422f-bcc8-643001c12655","Type":"ContainerStarted","Data":"5ba065f00ddfb7e36708d90963c3b63296bb3bbf36bcb0489160cc962d59b1ca"} Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.026000 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-log" containerID="cri-o://bdab1fa2229e7b13c887a1801b8b7c2e351be246fe974453360e248a5cd0304a" gracePeriod=30 Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.026801 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-httpd" containerID="cri-o://5ba065f00ddfb7e36708d90963c3b63296bb3bbf36bcb0489160cc962d59b1ca" gracePeriod=30 Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.031049 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a2b524-6312-4ae9-9970-bc4865c5de49","Type":"ContainerStarted","Data":"682f25a923da008daf547ad833353682e1c2deb587b8de5c951e6a174942fbac"} Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.031133 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-log" containerID="cri-o://d7cf7ce98aae3591d2f78feff522feffc40894ac2b964211bbfe73f277fe099b" gracePeriod=30 Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.031329 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-httpd" containerID="cri-o://682f25a923da008daf547ad833353682e1c2deb587b8de5c951e6a174942fbac" gracePeriod=30 Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.059112 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.059091033 podStartE2EDuration="5.059091033s" podCreationTimestamp="2026-01-28 17:00:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:00:57.049685791 +0000 UTC m=+1560.608012689" watchObservedRunningTime="2026-01-28 17:00:57.059091033 +0000 UTC m=+1560.617417921" Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.082810 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.082786319 podStartE2EDuration="5.082786319s" podCreationTimestamp="2026-01-28 17:00:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:00:57.074311782 +0000 UTC m=+1560.632638690" watchObservedRunningTime="2026-01-28 17:00:57.082786319 +0000 UTC m=+1560.641113207" Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.882610 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vg4fw"] Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.885253 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.899156 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vg4fw"] Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.942849 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-catalog-content\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.942922 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm59h\" (UniqueName: \"kubernetes.io/projected/fad3a099-fcee-4a91-9de7-c67834a9743c-kube-api-access-dm59h\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:57 crc kubenswrapper[4877]: I0128 17:00:57.943300 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-utilities\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045041 4877 generic.go:334] "Generic (PLEG): container finished" podID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerID="682f25a923da008daf547ad833353682e1c2deb587b8de5c951e6a174942fbac" exitCode=0 Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045401 4877 generic.go:334] "Generic (PLEG): container finished" podID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerID="d7cf7ce98aae3591d2f78feff522feffc40894ac2b964211bbfe73f277fe099b" exitCode=143 Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045142 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a2b524-6312-4ae9-9970-bc4865c5de49","Type":"ContainerDied","Data":"682f25a923da008daf547ad833353682e1c2deb587b8de5c951e6a174942fbac"} Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045492 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a2b524-6312-4ae9-9970-bc4865c5de49","Type":"ContainerDied","Data":"d7cf7ce98aae3591d2f78feff522feffc40894ac2b964211bbfe73f277fe099b"} Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045261 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-utilities\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045830 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-catalog-content\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045890 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm59h\" (UniqueName: \"kubernetes.io/projected/fad3a099-fcee-4a91-9de7-c67834a9743c-kube-api-access-dm59h\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.045956 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-utilities\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.046280 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-catalog-content\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.050972 4877 generic.go:334] "Generic (PLEG): container finished" podID="09aef551-5a7b-422f-bcc8-643001c12655" containerID="5ba065f00ddfb7e36708d90963c3b63296bb3bbf36bcb0489160cc962d59b1ca" exitCode=0 Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.051013 4877 generic.go:334] "Generic (PLEG): container finished" podID="09aef551-5a7b-422f-bcc8-643001c12655" containerID="bdab1fa2229e7b13c887a1801b8b7c2e351be246fe974453360e248a5cd0304a" exitCode=143 Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.051009 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09aef551-5a7b-422f-bcc8-643001c12655","Type":"ContainerDied","Data":"5ba065f00ddfb7e36708d90963c3b63296bb3bbf36bcb0489160cc962d59b1ca"} Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.051060 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09aef551-5a7b-422f-bcc8-643001c12655","Type":"ContainerDied","Data":"bdab1fa2229e7b13c887a1801b8b7c2e351be246fe974453360e248a5cd0304a"} Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.082839 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm59h\" (UniqueName: \"kubernetes.io/projected/fad3a099-fcee-4a91-9de7-c67834a9743c-kube-api-access-dm59h\") pod \"redhat-marketplace-vg4fw\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:00:58 crc kubenswrapper[4877]: I0128 17:00:58.204936 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.134012 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29493661-94ck2"] Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.136005 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.146951 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493661-94ck2"] Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.233364 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq2f6\" (UniqueName: \"kubernetes.io/projected/ece13606-7152-4723-a482-b27d1cf022d5-kube-api-access-gq2f6\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.233748 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-fernet-keys\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.233859 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-config-data\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.233965 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-combined-ca-bundle\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.335653 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-combined-ca-bundle\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.335825 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq2f6\" (UniqueName: \"kubernetes.io/projected/ece13606-7152-4723-a482-b27d1cf022d5-kube-api-access-gq2f6\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.335888 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-fernet-keys\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.336076 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-config-data\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.356731 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-config-data\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.357008 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-combined-ca-bundle\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.358288 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-fernet-keys\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.363762 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq2f6\" (UniqueName: \"kubernetes.io/projected/ece13606-7152-4723-a482-b27d1cf022d5-kube-api-access-gq2f6\") pod \"keystone-cron-29493661-94ck2\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:00 crc kubenswrapper[4877]: I0128 17:01:00.652271 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:02 crc kubenswrapper[4877]: I0128 17:01:02.769843 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:01:02 crc kubenswrapper[4877]: I0128 17:01:02.906993 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-ktcdh"] Jan 28 17:01:02 crc kubenswrapper[4877]: I0128 17:01:02.907737 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerName="dnsmasq-dns" containerID="cri-o://0bbd9f95078ca6f1c48588dcaa6f31a46d7683d350f5f9c90273b0e7e94db9dd" gracePeriod=10 Jan 28 17:01:04 crc kubenswrapper[4877]: I0128 17:01:04.116701 4877 generic.go:334] "Generic (PLEG): container finished" podID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerID="0bbd9f95078ca6f1c48588dcaa6f31a46d7683d350f5f9c90273b0e7e94db9dd" exitCode=0 Jan 28 17:01:04 crc kubenswrapper[4877]: I0128 17:01:04.116754 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" event={"ID":"63b3e104-e1f3-462d-98b7-4ab4f679b619","Type":"ContainerDied","Data":"0bbd9f95078ca6f1c48588dcaa6f31a46d7683d350f5f9c90273b0e7e94db9dd"} Jan 28 17:01:05 crc kubenswrapper[4877]: I0128 17:01:05.869288 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6hfcz" Jan 28 17:01:05 crc kubenswrapper[4877]: I0128 17:01:05.934077 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-combined-ca-bundle\") pod \"ffc3b7f9-8297-46ac-b550-d61d9513187c\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " Jan 28 17:01:05 crc kubenswrapper[4877]: I0128 17:01:05.934541 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r65vk\" (UniqueName: \"kubernetes.io/projected/ffc3b7f9-8297-46ac-b550-d61d9513187c-kube-api-access-r65vk\") pod \"ffc3b7f9-8297-46ac-b550-d61d9513187c\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " Jan 28 17:01:05 crc kubenswrapper[4877]: I0128 17:01:05.934835 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-db-sync-config-data\") pod \"ffc3b7f9-8297-46ac-b550-d61d9513187c\" (UID: \"ffc3b7f9-8297-46ac-b550-d61d9513187c\") " Jan 28 17:01:05 crc kubenswrapper[4877]: I0128 17:01:05.941909 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffc3b7f9-8297-46ac-b550-d61d9513187c-kube-api-access-r65vk" (OuterVolumeSpecName: "kube-api-access-r65vk") pod "ffc3b7f9-8297-46ac-b550-d61d9513187c" (UID: "ffc3b7f9-8297-46ac-b550-d61d9513187c"). InnerVolumeSpecName "kube-api-access-r65vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:05 crc kubenswrapper[4877]: I0128 17:01:05.942646 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ffc3b7f9-8297-46ac-b550-d61d9513187c" (UID: "ffc3b7f9-8297-46ac-b550-d61d9513187c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:05 crc kubenswrapper[4877]: I0128 17:01:05.980002 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffc3b7f9-8297-46ac-b550-d61d9513187c" (UID: "ffc3b7f9-8297-46ac-b550-d61d9513187c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.037900 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r65vk\" (UniqueName: \"kubernetes.io/projected/ffc3b7f9-8297-46ac-b550-d61d9513187c-kube-api-access-r65vk\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.037953 4877 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.037964 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc3b7f9-8297-46ac-b550-d61d9513187c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.141193 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6hfcz" event={"ID":"ffc3b7f9-8297-46ac-b550-d61d9513187c","Type":"ContainerDied","Data":"6fc9dc009ba949caaa21fd7a2282d34ab7b11e1eab2f481cf599584965113e86"} Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.141255 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fc9dc009ba949caaa21fd7a2282d34ab7b11e1eab2f481cf599584965113e86" Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.141213 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6hfcz" Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.334191 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:01:06 crc kubenswrapper[4877]: E0128 17:01:06.334611 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:01:06 crc kubenswrapper[4877]: I0128 17:01:06.380940 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.182:5353: connect: connection refused" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.167520 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-766dd7f94c-557n6"] Jan 28 17:01:07 crc kubenswrapper[4877]: E0128 17:01:07.168784 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc3b7f9-8297-46ac-b550-d61d9513187c" containerName="barbican-db-sync" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.168807 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc3b7f9-8297-46ac-b550-d61d9513187c" containerName="barbican-db-sync" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.169122 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc3b7f9-8297-46ac-b550-d61d9513187c" containerName="barbican-db-sync" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.181789 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.191202 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.191620 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.195345 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-l6frx" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.216709 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7984dc8994-gssff"] Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.219100 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.222737 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.263541 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-766dd7f94c-557n6"] Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271008 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-config-data-custom\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271172 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-config-data\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271260 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a24e6378-357f-4491-a769-e5e9cae6b549-logs\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271301 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-combined-ca-bundle\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271389 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk97l\" (UniqueName: \"kubernetes.io/projected/87ac4c7d-c58b-4930-a244-3d49c02af801-kube-api-access-tk97l\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271456 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-config-data\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271518 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-combined-ca-bundle\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271547 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87ac4c7d-c58b-4930-a244-3d49c02af801-logs\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271671 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-config-data-custom\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.271709 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llhrg\" (UniqueName: \"kubernetes.io/projected/a24e6378-357f-4491-a769-e5e9cae6b549-kube-api-access-llhrg\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.324551 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7984dc8994-gssff"] Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.388969 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-config-data\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389098 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-combined-ca-bundle\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389172 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87ac4c7d-c58b-4930-a244-3d49c02af801-logs\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389289 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-config-data-custom\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389357 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llhrg\" (UniqueName: \"kubernetes.io/projected/a24e6378-357f-4491-a769-e5e9cae6b549-kube-api-access-llhrg\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389419 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-config-data-custom\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389512 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-config-data\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389572 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a24e6378-357f-4491-a769-e5e9cae6b549-logs\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389631 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-combined-ca-bundle\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.389837 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk97l\" (UniqueName: \"kubernetes.io/projected/87ac4c7d-c58b-4930-a244-3d49c02af801-kube-api-access-tk97l\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.396290 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87ac4c7d-c58b-4930-a244-3d49c02af801-logs\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.399894 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a24e6378-357f-4491-a769-e5e9cae6b549-logs\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.433082 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-config-data-custom\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.433360 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-combined-ca-bundle\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.435131 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llhrg\" (UniqueName: \"kubernetes.io/projected/a24e6378-357f-4491-a769-e5e9cae6b549-kube-api-access-llhrg\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.441100 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-config-data\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.442510 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24e6378-357f-4491-a769-e5e9cae6b549-combined-ca-bundle\") pod \"barbican-keystone-listener-7984dc8994-gssff\" (UID: \"a24e6378-357f-4491-a769-e5e9cae6b549\") " pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.443942 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk97l\" (UniqueName: \"kubernetes.io/projected/87ac4c7d-c58b-4930-a244-3d49c02af801-kube-api-access-tk97l\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.449139 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-config-data\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.451852 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87ac4c7d-c58b-4930-a244-3d49c02af801-config-data-custom\") pod \"barbican-worker-766dd7f94c-557n6\" (UID: \"87ac4c7d-c58b-4930-a244-3d49c02af801\") " pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.570198 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7984dc8994-gssff" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.575396 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-766dd7f94c-557n6" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.673298 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-r26hn"] Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.696076 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.731119 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-svc\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.731888 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-nb\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.732097 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-sb\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.732185 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-config\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.732501 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-swift-storage-0\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.732728 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df8cp\" (UniqueName: \"kubernetes.io/projected/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-kube-api-access-df8cp\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.777700 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-r26hn"] Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.811654 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-94bdfbbc4-6n5dr"] Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.815887 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.834238 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.835060 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-94bdfbbc4-6n5dr"] Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837241 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae4830b8-7964-4686-a784-e357a560ec78-logs\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837287 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-svc\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837317 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-nb\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837359 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-combined-ca-bundle\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837381 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-sb\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837406 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-config\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837435 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnrn4\" (UniqueName: \"kubernetes.io/projected/ae4830b8-7964-4686-a784-e357a560ec78-kube-api-access-mnrn4\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837533 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-swift-storage-0\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837570 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data-custom\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837617 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df8cp\" (UniqueName: \"kubernetes.io/projected/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-kube-api-access-df8cp\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.837654 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.841549 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-config\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.841832 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-nb\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.842431 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-sb\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.842465 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-swift-storage-0\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.845041 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-svc\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.890421 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df8cp\" (UniqueName: \"kubernetes.io/projected/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-kube-api-access-df8cp\") pod \"dnsmasq-dns-59d5ff467f-r26hn\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.943963 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data-custom\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.944107 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.944197 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae4830b8-7964-4686-a784-e357a560ec78-logs\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.944275 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-combined-ca-bundle\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.944324 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnrn4\" (UniqueName: \"kubernetes.io/projected/ae4830b8-7964-4686-a784-e357a560ec78-kube-api-access-mnrn4\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.948190 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae4830b8-7964-4686-a784-e357a560ec78-logs\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.952687 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-combined-ca-bundle\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.955845 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.972846 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data-custom\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:07 crc kubenswrapper[4877]: I0128 17:01:07.994364 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnrn4\" (UniqueName: \"kubernetes.io/projected/ae4830b8-7964-4686-a784-e357a560ec78-kube-api-access-mnrn4\") pod \"barbican-api-94bdfbbc4-6n5dr\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:08 crc kubenswrapper[4877]: I0128 17:01:08.085834 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:08 crc kubenswrapper[4877]: I0128 17:01:08.191866 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:08 crc kubenswrapper[4877]: E0128 17:01:08.756014 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24:latest" Jan 28 17:01:08 crc kubenswrapper[4877]: E0128 17:01:08.756872 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24:latest,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nrvjt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(de95a2d4-539f-46f1-abcc-fe8e46e404ee): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 17:01:08 crc kubenswrapper[4877]: E0128 17:01:08.758852 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"ceilometer-notification-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="de95a2d4-539f-46f1-abcc-fe8e46e404ee" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.127124 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.149832 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.183889 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.195654 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.195776 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzxkv\" (UniqueName: \"kubernetes.io/projected/63b3e104-e1f3-462d-98b7-4ab4f679b619-kube-api-access-bzxkv\") pod \"63b3e104-e1f3-462d-98b7-4ab4f679b619\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196261 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-sb\") pod \"63b3e104-e1f3-462d-98b7-4ab4f679b619\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196346 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-svc\") pod \"63b3e104-e1f3-462d-98b7-4ab4f679b619\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196567 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-nb\") pod \"63b3e104-e1f3-462d-98b7-4ab4f679b619\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196662 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wjdc\" (UniqueName: \"kubernetes.io/projected/09aef551-5a7b-422f-bcc8-643001c12655-kube-api-access-7wjdc\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196695 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-httpd-run\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196804 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-scripts\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196860 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-combined-ca-bundle\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196895 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-swift-storage-0\") pod \"63b3e104-e1f3-462d-98b7-4ab4f679b619\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196929 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-logs\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.196990 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.197022 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-config\") pod \"63b3e104-e1f3-462d-98b7-4ab4f679b619\" (UID: \"63b3e104-e1f3-462d-98b7-4ab4f679b619\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.198038 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.201874 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-logs" (OuterVolumeSpecName: "logs") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.208441 4877 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.208502 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09aef551-5a7b-422f-bcc8-643001c12655-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.237019 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-scripts" (OuterVolumeSpecName: "scripts") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.247428 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09aef551-5a7b-422f-bcc8-643001c12655-kube-api-access-7wjdc" (OuterVolumeSpecName: "kube-api-access-7wjdc") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "kube-api-access-7wjdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.280517 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63b3e104-e1f3-462d-98b7-4ab4f679b619-kube-api-access-bzxkv" (OuterVolumeSpecName: "kube-api-access-bzxkv") pod "63b3e104-e1f3-462d-98b7-4ab4f679b619" (UID: "63b3e104-e1f3-462d-98b7-4ab4f679b619"). InnerVolumeSpecName "kube-api-access-bzxkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.280737 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409" (OuterVolumeSpecName: "glance") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.303806 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.303817 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-ktcdh" event={"ID":"63b3e104-e1f3-462d-98b7-4ab4f679b619","Type":"ContainerDied","Data":"10fe46e31ac42442e11a49ed25dec00f1abb1a19605c2ae561e2ca0ccc59dd10"} Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.308155 4877 scope.go:117] "RemoveContainer" containerID="0bbd9f95078ca6f1c48588dcaa6f31a46d7683d350f5f9c90273b0e7e94db9dd" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.310574 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"09aef551-5a7b-422f-bcc8-643001c12655","Type":"ContainerDied","Data":"7522b252bf57c8296f440595822e271d56a825e057b411c81b1ca8574314f2a4"} Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.310657 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.315665 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de95a2d4-539f-46f1-abcc-fe8e46e404ee" containerName="sg-core" containerID="cri-o://a1ce63d58c81cf657fad7b110f07c592e07651cccafff64066f7596f77886d33" gracePeriod=30 Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.316030 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.316292 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4a2b524-6312-4ae9-9970-bc4865c5de49","Type":"ContainerDied","Data":"5e1440e6c9c49c10021f2d66cc11b8595fd04c9fa3325b42f02594c61dd076bd"} Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.331260 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6l8q\" (UniqueName: \"kubernetes.io/projected/d4a2b524-6312-4ae9-9970-bc4865c5de49-kube-api-access-q6l8q\") pod \"d4a2b524-6312-4ae9-9970-bc4865c5de49\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.331487 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"d4a2b524-6312-4ae9-9970-bc4865c5de49\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.331673 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-combined-ca-bundle\") pod \"d4a2b524-6312-4ae9-9970-bc4865c5de49\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.331697 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-scripts\") pod \"d4a2b524-6312-4ae9-9970-bc4865c5de49\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.331766 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-httpd-run\") pod \"d4a2b524-6312-4ae9-9970-bc4865c5de49\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.331922 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-logs\") pod \"d4a2b524-6312-4ae9-9970-bc4865c5de49\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.331961 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-config-data\") pod \"d4a2b524-6312-4ae9-9970-bc4865c5de49\" (UID: \"d4a2b524-6312-4ae9-9970-bc4865c5de49\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.332961 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") on node \"crc\" " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.332985 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzxkv\" (UniqueName: \"kubernetes.io/projected/63b3e104-e1f3-462d-98b7-4ab4f679b619-kube-api-access-bzxkv\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.332998 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wjdc\" (UniqueName: \"kubernetes.io/projected/09aef551-5a7b-422f-bcc8-643001c12655-kube-api-access-7wjdc\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.333009 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.335267 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d4a2b524-6312-4ae9-9970-bc4865c5de49" (UID: "d4a2b524-6312-4ae9-9970-bc4865c5de49"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.336164 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-logs" (OuterVolumeSpecName: "logs") pod "d4a2b524-6312-4ae9-9970-bc4865c5de49" (UID: "d4a2b524-6312-4ae9-9970-bc4865c5de49"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.398983 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-scripts" (OuterVolumeSpecName: "scripts") pod "d4a2b524-6312-4ae9-9970-bc4865c5de49" (UID: "d4a2b524-6312-4ae9-9970-bc4865c5de49"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.399072 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4a2b524-6312-4ae9-9970-bc4865c5de49-kube-api-access-q6l8q" (OuterVolumeSpecName: "kube-api-access-q6l8q") pod "d4a2b524-6312-4ae9-9970-bc4865c5de49" (UID: "d4a2b524-6312-4ae9-9970-bc4865c5de49"). InnerVolumeSpecName "kube-api-access-q6l8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.437274 4877 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.437302 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a2b524-6312-4ae9-9970-bc4865c5de49-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.437314 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6l8q\" (UniqueName: \"kubernetes.io/projected/d4a2b524-6312-4ae9-9970-bc4865c5de49-kube-api-access-q6l8q\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.437326 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.440178 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.440591 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409") on node "crc" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.468598 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0" (OuterVolumeSpecName: "glance") pod "d4a2b524-6312-4ae9-9970-bc4865c5de49" (UID: "d4a2b524-6312-4ae9-9970-bc4865c5de49"). InnerVolumeSpecName "pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.479749 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.510377 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4a2b524-6312-4ae9-9970-bc4865c5de49" (UID: "d4a2b524-6312-4ae9-9970-bc4865c5de49"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.522257 4877 scope.go:117] "RemoveContainer" containerID="ce3612223051e607b69728d9f2ca0d61b8a64eead01bc6e7152d430627570f27" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.526549 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data" (OuterVolumeSpecName: "config-data") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: W0128 17:01:09.561754 4877 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/09aef551-5a7b-422f-bcc8-643001c12655/volumes/kubernetes.io~secret/config-data Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.561789 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data" (OuterVolumeSpecName: "config-data") pod "09aef551-5a7b-422f-bcc8-643001c12655" (UID: "09aef551-5a7b-422f-bcc8-643001c12655"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.562042 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data\") pod \"09aef551-5a7b-422f-bcc8-643001c12655\" (UID: \"09aef551-5a7b-422f-bcc8-643001c12655\") " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.563198 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.564932 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.569462 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") on node \"crc\" " Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.575950 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.589843 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef551-5a7b-422f-bcc8-643001c12655-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.588563 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "63b3e104-e1f3-462d-98b7-4ab4f679b619" (UID: "63b3e104-e1f3-462d-98b7-4ab4f679b619"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.590668 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "63b3e104-e1f3-462d-98b7-4ab4f679b619" (UID: "63b3e104-e1f3-462d-98b7-4ab4f679b619"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.591950 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "63b3e104-e1f3-462d-98b7-4ab4f679b619" (UID: "63b3e104-e1f3-462d-98b7-4ab4f679b619"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.616348 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-config-data" (OuterVolumeSpecName: "config-data") pod "d4a2b524-6312-4ae9-9970-bc4865c5de49" (UID: "d4a2b524-6312-4ae9-9970-bc4865c5de49"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.616398 4877 scope.go:117] "RemoveContainer" containerID="5ba065f00ddfb7e36708d90963c3b63296bb3bbf36bcb0489160cc962d59b1ca" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.616727 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "63b3e104-e1f3-462d-98b7-4ab4f679b619" (UID: "63b3e104-e1f3-462d-98b7-4ab4f679b619"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.660580 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.660748 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0") on node "crc" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.661067 4877 scope.go:117] "RemoveContainer" containerID="bdab1fa2229e7b13c887a1801b8b7c2e351be246fe974453360e248a5cd0304a" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.681230 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-config" (OuterVolumeSpecName: "config") pod "63b3e104-e1f3-462d-98b7-4ab4f679b619" (UID: "63b3e104-e1f3-462d-98b7-4ab4f679b619"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.693508 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.697678 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.697828 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.697890 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.697945 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.698007 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a2b524-6312-4ae9-9970-bc4865c5de49-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.699534 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63b3e104-e1f3-462d-98b7-4ab4f679b619-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.715159 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.721763 4877 scope.go:117] "RemoveContainer" containerID="682f25a923da008daf547ad833353682e1c2deb587b8de5c951e6a174942fbac" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.744549 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.757246 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.808546 4877 scope.go:117] "RemoveContainer" containerID="d7cf7ce98aae3591d2f78feff522feffc40894ac2b964211bbfe73f277fe099b" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.843464 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.892163 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:01:09 crc kubenswrapper[4877]: E0128 17:01:09.899310 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerName="init" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.899452 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerName="init" Jan 28 17:01:09 crc kubenswrapper[4877]: E0128 17:01:09.899594 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerName="dnsmasq-dns" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.899655 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerName="dnsmasq-dns" Jan 28 17:01:09 crc kubenswrapper[4877]: E0128 17:01:09.899708 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-log" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.899756 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-log" Jan 28 17:01:09 crc kubenswrapper[4877]: E0128 17:01:09.899817 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-httpd" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.899873 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-httpd" Jan 28 17:01:09 crc kubenswrapper[4877]: E0128 17:01:09.899933 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-httpd" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.899999 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-httpd" Jan 28 17:01:09 crc kubenswrapper[4877]: E0128 17:01:09.900081 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-log" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.900141 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-log" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.900559 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-httpd" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.900637 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" containerName="dnsmasq-dns" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.900705 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" containerName="glance-log" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.900765 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-log" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.900875 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="09aef551-5a7b-422f-bcc8-643001c12655" containerName="glance-httpd" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.902244 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.904709 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.905690 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jptj6" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.906060 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.906334 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.906973 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.929467 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.937562 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.955534 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.956242 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 17:01:09 crc kubenswrapper[4877]: I0128 17:01:09.975755 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026301 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-logs\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026358 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026431 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-logs\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026466 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026546 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vmg9\" (UniqueName: \"kubernetes.io/projected/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-kube-api-access-2vmg9\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026606 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026646 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-scripts\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026681 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026726 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026771 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026807 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026842 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-config-data\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026881 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026918 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026954 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.026998 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5hjm\" (UniqueName: \"kubernetes.io/projected/54a9533f-7d16-4422-ac68-5ff6e34ddf39-kube-api-access-j5hjm\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.035277 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vg4fw"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.058574 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493661-94ck2"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129585 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129674 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129725 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129758 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-config-data\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129800 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129842 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129881 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129923 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5hjm\" (UniqueName: \"kubernetes.io/projected/54a9533f-7d16-4422-ac68-5ff6e34ddf39-kube-api-access-j5hjm\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129963 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-logs\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.129987 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.130051 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-logs\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.130087 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.130228 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vmg9\" (UniqueName: \"kubernetes.io/projected/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-kube-api-access-2vmg9\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.130287 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.130330 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-scripts\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.130364 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.131028 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.146722 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-logs\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.146876 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.147361 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-logs\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.148127 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.148160 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c970fd9b051e1e4708c5b978fbab2178e7872a320f8059867f5cd332a890e640/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.149713 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.150991 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.151025 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5992022521d9a8a870a78abda3cb3974fc8658c66623b3cbfa6fe8c84dc59df6/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.168291 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.170073 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.171988 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.172863 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vmg9\" (UniqueName: \"kubernetes.io/projected/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-kube-api-access-2vmg9\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.172915 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.203349 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5hjm\" (UniqueName: \"kubernetes.io/projected/54a9533f-7d16-4422-ac68-5ff6e34ddf39-kube-api-access-j5hjm\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.215155 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-config-data\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.216833 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-scripts\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.242171 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.288321 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.370236 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.393691 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vg4fw" event={"ID":"fad3a099-fcee-4a91-9de7-c67834a9743c","Type":"ContainerStarted","Data":"3cca45254379da0873efbe516c75b852b51ca67a6ac343f523f4acdf68142309"} Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.403470 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493661-94ck2" event={"ID":"ece13606-7152-4723-a482-b27d1cf022d5","Type":"ContainerStarted","Data":"6b0fd9395a7fd99c1788aa32a167f5cd0e07bd320144b2733611c4f892cea2b5"} Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.463300 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.464420 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.471549 4877 generic.go:334] "Generic (PLEG): container finished" podID="de95a2d4-539f-46f1-abcc-fe8e46e404ee" containerID="a1ce63d58c81cf657fad7b110f07c592e07651cccafff64066f7596f77886d33" exitCode=2 Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.471707 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de95a2d4-539f-46f1-abcc-fe8e46e404ee","Type":"ContainerDied","Data":"a1ce63d58c81cf657fad7b110f07c592e07651cccafff64066f7596f77886d33"} Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.479294 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.498432 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfq2d" event={"ID":"dcc64de1-66f2-48e6-969f-61aa68773678","Type":"ContainerStarted","Data":"6f8ccc3cfb7937a158f3c5e2724dae140de84091b694c21a2c9bb240940d4b6d"} Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.500576 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-94bdfbbc4-6n5dr"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.514988 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-766dd7f94c-557n6"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.540844 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-ktcdh"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.546652 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-ktcdh"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.561009 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-log-httpd\") pod \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " Jan 28 17:01:10 crc kubenswrapper[4877]: W0128 17:01:10.561158 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae4830b8_7964_4686_a784_e357a560ec78.slice/crio-e80d7ea3cda37f0194d0ca86cd98c318db77441922dd1a031591ebcb71872690 WatchSource:0}: Error finding container e80d7ea3cda37f0194d0ca86cd98c318db77441922dd1a031591ebcb71872690: Status 404 returned error can't find the container with id e80d7ea3cda37f0194d0ca86cd98c318db77441922dd1a031591ebcb71872690 Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.561241 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-combined-ca-bundle\") pod \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.563090 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "de95a2d4-539f-46f1-abcc-fe8e46e404ee" (UID: "de95a2d4-539f-46f1-abcc-fe8e46e404ee"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.563360 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-sg-core-conf-yaml\") pod \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.563419 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-run-httpd\") pod \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.563470 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-config-data\") pod \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.563525 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrvjt\" (UniqueName: \"kubernetes.io/projected/de95a2d4-539f-46f1-abcc-fe8e46e404ee-kube-api-access-nrvjt\") pod \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.563749 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-scripts\") pod \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\" (UID: \"de95a2d4-539f-46f1-abcc-fe8e46e404ee\") " Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.565193 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.566392 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "de95a2d4-539f-46f1-abcc-fe8e46e404ee" (UID: "de95a2d4-539f-46f1-abcc-fe8e46e404ee"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.582558 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-config-data" (OuterVolumeSpecName: "config-data") pod "de95a2d4-539f-46f1-abcc-fe8e46e404ee" (UID: "de95a2d4-539f-46f1-abcc-fe8e46e404ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.606108 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de95a2d4-539f-46f1-abcc-fe8e46e404ee-kube-api-access-nrvjt" (OuterVolumeSpecName: "kube-api-access-nrvjt") pod "de95a2d4-539f-46f1-abcc-fe8e46e404ee" (UID: "de95a2d4-539f-46f1-abcc-fe8e46e404ee"). InnerVolumeSpecName "kube-api-access-nrvjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.607953 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de95a2d4-539f-46f1-abcc-fe8e46e404ee" (UID: "de95a2d4-539f-46f1-abcc-fe8e46e404ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.611985 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-scripts" (OuterVolumeSpecName: "scripts") pod "de95a2d4-539f-46f1-abcc-fe8e46e404ee" (UID: "de95a2d4-539f-46f1-abcc-fe8e46e404ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.634106 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-kfq2d" podStartSLOduration=4.285415913 podStartE2EDuration="1m20.634059061s" podCreationTimestamp="2026-01-28 16:59:50 +0000 UTC" firstStartedPulling="2026-01-28 16:59:52.638745425 +0000 UTC m=+1496.197072313" lastFinishedPulling="2026-01-28 17:01:08.987388573 +0000 UTC m=+1572.545715461" observedRunningTime="2026-01-28 17:01:10.57592646 +0000 UTC m=+1574.134253358" watchObservedRunningTime="2026-01-28 17:01:10.634059061 +0000 UTC m=+1574.192385949" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.648786 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "de95a2d4-539f-46f1-abcc-fe8e46e404ee" (UID: "de95a2d4-539f-46f1-abcc-fe8e46e404ee"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.672192 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.672219 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.672229 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.672237 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de95a2d4-539f-46f1-abcc-fe8e46e404ee-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.672245 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de95a2d4-539f-46f1-abcc-fe8e46e404ee-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.672252 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrvjt\" (UniqueName: \"kubernetes.io/projected/de95a2d4-539f-46f1-abcc-fe8e46e404ee-kube-api-access-nrvjt\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:10 crc kubenswrapper[4877]: W0128 17:01:10.896389 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda24e6378_357f_4491_a769_e5e9cae6b549.slice/crio-c3fd0a2008e74a2be6ff1d0cfef19481861aeb0e3e426b0e281b981466041036 WatchSource:0}: Error finding container c3fd0a2008e74a2be6ff1d0cfef19481861aeb0e3e426b0e281b981466041036: Status 404 returned error can't find the container with id c3fd0a2008e74a2be6ff1d0cfef19481861aeb0e3e426b0e281b981466041036 Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.924700 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7984dc8994-gssff"] Jan 28 17:01:10 crc kubenswrapper[4877]: I0128 17:01:10.982290 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-r26hn"] Jan 28 17:01:11 crc kubenswrapper[4877]: W0128 17:01:11.282311 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a9533f_7d16_4422_ac68_5ff6e34ddf39.slice/crio-526207bef1f52df9f9a84a9f9fea1846c224f44e534a726c492f31933d830d3d WatchSource:0}: Error finding container 526207bef1f52df9f9a84a9f9fea1846c224f44e534a726c492f31933d830d3d: Status 404 returned error can't find the container with id 526207bef1f52df9f9a84a9f9fea1846c224f44e534a726c492f31933d830d3d Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.292075 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.363847 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09aef551-5a7b-422f-bcc8-643001c12655" path="/var/lib/kubelet/pods/09aef551-5a7b-422f-bcc8-643001c12655/volumes" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.364703 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63b3e104-e1f3-462d-98b7-4ab4f679b619" path="/var/lib/kubelet/pods/63b3e104-e1f3-462d-98b7-4ab4f679b619/volumes" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.365369 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4a2b524-6312-4ae9-9970-bc4865c5de49" path="/var/lib/kubelet/pods/d4a2b524-6312-4ae9-9970-bc4865c5de49/volumes" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.516878 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493661-94ck2" event={"ID":"ece13606-7152-4723-a482-b27d1cf022d5","Type":"ContainerStarted","Data":"23213e1afd081109c0a38613de24cff87fc27912a80b98b2cd6b03a85c207866"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.520093 4877 generic.go:334] "Generic (PLEG): container finished" podID="0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" containerID="91c27be8ad2a3cedce2f5a618a9a9bace58c38236fb7bf48451a18fba21e8ee1" exitCode=0 Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.520182 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pbjv2" event={"ID":"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5","Type":"ContainerDied","Data":"91c27be8ad2a3cedce2f5a618a9a9bace58c38236fb7bf48451a18fba21e8ee1"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.524100 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de95a2d4-539f-46f1-abcc-fe8e46e404ee","Type":"ContainerDied","Data":"27a83b011a9986615aba5ed86b934f630cdfecf354d611105917ebb56af6c07e"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.524164 4877 scope.go:117] "RemoveContainer" containerID="a1ce63d58c81cf657fad7b110f07c592e07651cccafff64066f7596f77886d33" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.524362 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.540395 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54a9533f-7d16-4422-ac68-5ff6e34ddf39","Type":"ContainerStarted","Data":"526207bef1f52df9f9a84a9f9fea1846c224f44e534a726c492f31933d830d3d"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.543207 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.547792 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29493661-94ck2" podStartSLOduration=11.547768121 podStartE2EDuration="11.547768121s" podCreationTimestamp="2026-01-28 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:11.532876402 +0000 UTC m=+1575.091203290" watchObservedRunningTime="2026-01-28 17:01:11.547768121 +0000 UTC m=+1575.106095009" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.549746 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-766dd7f94c-557n6" event={"ID":"87ac4c7d-c58b-4930-a244-3d49c02af801","Type":"ContainerStarted","Data":"f952c0fb66be5c806b1b82e7613ced8aef62b7966c85fbadf2d3ca70bbd117d2"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.560870 4877 generic.go:334] "Generic (PLEG): container finished" podID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerID="23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917" exitCode=0 Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.561383 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vg4fw" event={"ID":"fad3a099-fcee-4a91-9de7-c67834a9743c","Type":"ContainerDied","Data":"23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.574572 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94bdfbbc4-6n5dr" event={"ID":"ae4830b8-7964-4686-a784-e357a560ec78","Type":"ContainerStarted","Data":"a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.574622 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94bdfbbc4-6n5dr" event={"ID":"ae4830b8-7964-4686-a784-e357a560ec78","Type":"ContainerStarted","Data":"840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.574633 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94bdfbbc4-6n5dr" event={"ID":"ae4830b8-7964-4686-a784-e357a560ec78","Type":"ContainerStarted","Data":"e80d7ea3cda37f0194d0ca86cd98c318db77441922dd1a031591ebcb71872690"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.575191 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.575534 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.581733 4877 generic.go:334] "Generic (PLEG): container finished" podID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerID="bb6a8d1a93768af9792ca9fc0a4274d4ad82276e9fa81bc3dbaaf6dea8760ac3" exitCode=0 Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.581836 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" event={"ID":"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a","Type":"ContainerDied","Data":"bb6a8d1a93768af9792ca9fc0a4274d4ad82276e9fa81bc3dbaaf6dea8760ac3"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.581873 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" event={"ID":"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a","Type":"ContainerStarted","Data":"903b52c16fdafe274c4bc8eff90a1654df7e7c9de5d12a953aa3e84c2bb3eb48"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.601006 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7984dc8994-gssff" event={"ID":"a24e6378-357f-4491-a769-e5e9cae6b549","Type":"ContainerStarted","Data":"c3fd0a2008e74a2be6ff1d0cfef19481861aeb0e3e426b0e281b981466041036"} Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.633326 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.646447 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.672563 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:11 crc kubenswrapper[4877]: E0128 17:01:11.673396 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de95a2d4-539f-46f1-abcc-fe8e46e404ee" containerName="sg-core" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.673415 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="de95a2d4-539f-46f1-abcc-fe8e46e404ee" containerName="sg-core" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.673748 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="de95a2d4-539f-46f1-abcc-fe8e46e404ee" containerName="sg-core" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.681783 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.686980 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.687186 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.689226 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-94bdfbbc4-6n5dr" podStartSLOduration=4.689198768 podStartE2EDuration="4.689198768s" podCreationTimestamp="2026-01-28 17:01:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:11.624156222 +0000 UTC m=+1575.182483110" watchObservedRunningTime="2026-01-28 17:01:11.689198768 +0000 UTC m=+1575.247525656" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.726903 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.731432 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhp8f\" (UniqueName: \"kubernetes.io/projected/747d3fca-be49-419c-b42f-e746edee5eda-kube-api-access-mhp8f\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.731525 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.731589 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.731660 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-scripts\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.731688 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-config-data\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.731723 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-run-httpd\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.731757 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-log-httpd\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.834986 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhp8f\" (UniqueName: \"kubernetes.io/projected/747d3fca-be49-419c-b42f-e746edee5eda-kube-api-access-mhp8f\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.835054 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.835081 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.835130 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-scripts\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.835153 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-config-data\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.835176 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-run-httpd\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.835202 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-log-httpd\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.835787 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-log-httpd\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.836041 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-run-httpd\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.848382 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-config-data\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.850278 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.857545 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhp8f\" (UniqueName: \"kubernetes.io/projected/747d3fca-be49-419c-b42f-e746edee5eda-kube-api-access-mhp8f\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.863275 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.892496 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-scripts\") pod \"ceilometer-0\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " pod="openstack/ceilometer-0" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.932820 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7559f7bb56-qgpxn"] Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.935811 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.939074 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.939287 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 28 17:01:11 crc kubenswrapper[4877]: I0128 17:01:11.979080 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7559f7bb56-qgpxn"] Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.029010 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.042513 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r85p\" (UniqueName: \"kubernetes.io/projected/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-kube-api-access-8r85p\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.042563 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-config-data\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.042661 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-config-data-custom\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.042735 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-public-tls-certs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.042837 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-logs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.042884 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-internal-tls-certs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.042925 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-combined-ca-bundle\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.145321 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-combined-ca-bundle\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.145399 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r85p\" (UniqueName: \"kubernetes.io/projected/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-kube-api-access-8r85p\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.145420 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-config-data\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.145497 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-config-data-custom\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.145549 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-public-tls-certs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.145617 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-logs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.145646 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-internal-tls-certs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.146089 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-logs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.154261 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-public-tls-certs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.154542 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-combined-ca-bundle\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.156215 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-internal-tls-certs\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.169129 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-config-data-custom\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.170045 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r85p\" (UniqueName: \"kubernetes.io/projected/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-kube-api-access-8r85p\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.170402 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9e6b9b4-d0da-4d73-913a-6e03bbb512c8-config-data\") pod \"barbican-api-7559f7bb56-qgpxn\" (UID: \"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8\") " pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.363744 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.618622 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" event={"ID":"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a","Type":"ContainerStarted","Data":"c8dc1c60fe66e5f67c149fda9f6738fd71fdc01508f997eb3bdda1a70f764bf9"} Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.619167 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.635331 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7","Type":"ContainerStarted","Data":"2c34a214086dc05da67ff2aa1a4ccb116b31f3ce56731382affa08860fa0888a"} Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.638186 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54a9533f-7d16-4422-ac68-5ff6e34ddf39","Type":"ContainerStarted","Data":"f42cee324b23a633e0a4278f7eafd8b837b1d3c804e14fb895f80a2336cd45b6"} Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.681164 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" podStartSLOduration=5.681137949 podStartE2EDuration="5.681137949s" podCreationTimestamp="2026-01-28 17:01:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:12.660582927 +0000 UTC m=+1576.218909845" watchObservedRunningTime="2026-01-28 17:01:12.681137949 +0000 UTC m=+1576.239464837" Jan 28 17:01:12 crc kubenswrapper[4877]: W0128 17:01:12.837867 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod747d3fca_be49_419c_b42f_e746edee5eda.slice/crio-bd6de29d42b2bdd02e0dbb00c2e90f2c51d6a842ceeb10cc62231cda83e64322 WatchSource:0}: Error finding container bd6de29d42b2bdd02e0dbb00c2e90f2c51d6a842ceeb10cc62231cda83e64322: Status 404 returned error can't find the container with id bd6de29d42b2bdd02e0dbb00c2e90f2c51d6a842ceeb10cc62231cda83e64322 Jan 28 17:01:12 crc kubenswrapper[4877]: I0128 17:01:12.843794 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.214893 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7559f7bb56-qgpxn"] Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.351782 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de95a2d4-539f-46f1-abcc-fe8e46e404ee" path="/var/lib/kubelet/pods/de95a2d4-539f-46f1-abcc-fe8e46e404ee/volumes" Jan 28 17:01:13 crc kubenswrapper[4877]: W0128 17:01:13.455836 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9e6b9b4_d0da_4d73_913a_6e03bbb512c8.slice/crio-b99184826ff60518484bcc9ecd8d6698812ca7c410f2f3dd557adb393ae50b26 WatchSource:0}: Error finding container b99184826ff60518484bcc9ecd8d6698812ca7c410f2f3dd557adb393ae50b26: Status 404 returned error can't find the container with id b99184826ff60518484bcc9ecd8d6698812ca7c410f2f3dd557adb393ae50b26 Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.613598 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pbjv2" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.653881 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7","Type":"ContainerStarted","Data":"bb72620583d5ff8ef94f423a3e46172d9d8580295b24de01813a26858b551360"} Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.656611 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7559f7bb56-qgpxn" event={"ID":"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8","Type":"ContainerStarted","Data":"b99184826ff60518484bcc9ecd8d6698812ca7c410f2f3dd557adb393ae50b26"} Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.658747 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerStarted","Data":"bd6de29d42b2bdd02e0dbb00c2e90f2c51d6a842ceeb10cc62231cda83e64322"} Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.670074 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-pbjv2" event={"ID":"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5","Type":"ContainerDied","Data":"6963953e61b4e6fecdf05ec777c33a1016c586ddcbb9c42e89f3ae8bb5934532"} Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.670185 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6963953e61b4e6fecdf05ec777c33a1016c586ddcbb9c42e89f3ae8bb5934532" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.670137 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-pbjv2" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.685630 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54a9533f-7d16-4422-ac68-5ff6e34ddf39","Type":"ContainerStarted","Data":"fbd861627eeb60a09876529ea4a64e3b217414fc001ef4e87aa4dc127ce7425c"} Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.718018 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8plmh\" (UniqueName: \"kubernetes.io/projected/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-kube-api-access-8plmh\") pod \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.718265 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-config-data\") pod \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.718683 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-combined-ca-bundle\") pod \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\" (UID: \"0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5\") " Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.746098 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.74607616 podStartE2EDuration="4.74607616s" podCreationTimestamp="2026-01-28 17:01:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:13.727857971 +0000 UTC m=+1577.286184849" watchObservedRunningTime="2026-01-28 17:01:13.74607616 +0000 UTC m=+1577.304403048" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.773218 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-kube-api-access-8plmh" (OuterVolumeSpecName: "kube-api-access-8plmh") pod "0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" (UID: "0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5"). InnerVolumeSpecName "kube-api-access-8plmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.782758 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" (UID: "0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.825598 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.825637 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8plmh\" (UniqueName: \"kubernetes.io/projected/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-kube-api-access-8plmh\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.846767 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-config-data" (OuterVolumeSpecName: "config-data") pod "0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" (UID: "0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:13 crc kubenswrapper[4877]: I0128 17:01:13.927864 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:14 crc kubenswrapper[4877]: I0128 17:01:14.720310 4877 generic.go:334] "Generic (PLEG): container finished" podID="6becdddb-915e-40e0-ba03-9de124ad56c7" containerID="d67867eb4e420db2210dc7a867be431ead9a5ee36c8f4aeb0e89367ae7b38663" exitCode=0 Jan 28 17:01:14 crc kubenswrapper[4877]: I0128 17:01:14.720426 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-957zv" event={"ID":"6becdddb-915e-40e0-ba03-9de124ad56c7","Type":"ContainerDied","Data":"d67867eb4e420db2210dc7a867be431ead9a5ee36c8f4aeb0e89367ae7b38663"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.738773 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerStarted","Data":"5a80b0963b889c7583031bd7eabe06771d3302929136286d0e3ffbd8e46de522"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.743736 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-766dd7f94c-557n6" event={"ID":"87ac4c7d-c58b-4930-a244-3d49c02af801","Type":"ContainerStarted","Data":"3e7d28ad9d4bd85d87d829f2185dda8b21688c262a8463f9e4d1836f3c4ea919"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.743774 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-766dd7f94c-557n6" event={"ID":"87ac4c7d-c58b-4930-a244-3d49c02af801","Type":"ContainerStarted","Data":"6a0189d1a573a8fad984d5538acf8277860fd3a96be07aa78987e83e8028d58f"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.749096 4877 generic.go:334] "Generic (PLEG): container finished" podID="dcc64de1-66f2-48e6-969f-61aa68773678" containerID="6f8ccc3cfb7937a158f3c5e2724dae140de84091b694c21a2c9bb240940d4b6d" exitCode=0 Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.749278 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfq2d" event={"ID":"dcc64de1-66f2-48e6-969f-61aa68773678","Type":"ContainerDied","Data":"6f8ccc3cfb7937a158f3c5e2724dae140de84091b694c21a2c9bb240940d4b6d"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.756793 4877 generic.go:334] "Generic (PLEG): container finished" podID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerID="813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a" exitCode=0 Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.756876 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vg4fw" event={"ID":"fad3a099-fcee-4a91-9de7-c67834a9743c","Type":"ContainerDied","Data":"813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.792356 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7","Type":"ContainerStarted","Data":"d4993471e4e07184c25eddffffbc1a8d80e15615dfb77573261798207c61721e"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.802789 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7559f7bb56-qgpxn" event={"ID":"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8","Type":"ContainerStarted","Data":"868f2027dab9c301c7b283fa830dc4763d9596611b3a990400c3c23a70126d33"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.816951 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-766dd7f94c-557n6" podStartSLOduration=4.956317209 podStartE2EDuration="8.816934008s" podCreationTimestamp="2026-01-28 17:01:07 +0000 UTC" firstStartedPulling="2026-01-28 17:01:10.746751576 +0000 UTC m=+1574.305078464" lastFinishedPulling="2026-01-28 17:01:14.607368375 +0000 UTC m=+1578.165695263" observedRunningTime="2026-01-28 17:01:15.781707512 +0000 UTC m=+1579.340034400" watchObservedRunningTime="2026-01-28 17:01:15.816934008 +0000 UTC m=+1579.375260896" Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.823709 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7984dc8994-gssff" event={"ID":"a24e6378-357f-4491-a769-e5e9cae6b549","Type":"ContainerStarted","Data":"f2e2c525df14514da5ea52473b33c3cc145909217f09aabd404e765a1a0bb890"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.824083 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7984dc8994-gssff" event={"ID":"a24e6378-357f-4491-a769-e5e9cae6b549","Type":"ContainerStarted","Data":"4cd699e4575461d1169ba0cb4636ecbcc731e3f2c118ba7b4419010af14b6f0e"} Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.940520 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.940472645 podStartE2EDuration="6.940472645s" podCreationTimestamp="2026-01-28 17:01:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:15.880276499 +0000 UTC m=+1579.438603387" watchObservedRunningTime="2026-01-28 17:01:15.940472645 +0000 UTC m=+1579.498799533" Jan 28 17:01:15 crc kubenswrapper[4877]: I0128 17:01:15.968151 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7984dc8994-gssff" podStartSLOduration=5.111034103 podStartE2EDuration="8.968129027s" podCreationTimestamp="2026-01-28 17:01:07 +0000 UTC" firstStartedPulling="2026-01-28 17:01:10.925746122 +0000 UTC m=+1574.484073020" lastFinishedPulling="2026-01-28 17:01:14.782841056 +0000 UTC m=+1578.341167944" observedRunningTime="2026-01-28 17:01:15.907200772 +0000 UTC m=+1579.465527660" watchObservedRunningTime="2026-01-28 17:01:15.968129027 +0000 UTC m=+1579.526455915" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.583426 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-957zv" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.724345 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-db-sync-config-data\") pod \"6becdddb-915e-40e0-ba03-9de124ad56c7\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.724978 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vnj2\" (UniqueName: \"kubernetes.io/projected/6becdddb-915e-40e0-ba03-9de124ad56c7-kube-api-access-8vnj2\") pod \"6becdddb-915e-40e0-ba03-9de124ad56c7\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.725039 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-config-data\") pod \"6becdddb-915e-40e0-ba03-9de124ad56c7\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.725090 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-combined-ca-bundle\") pod \"6becdddb-915e-40e0-ba03-9de124ad56c7\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.725577 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6becdddb-915e-40e0-ba03-9de124ad56c7-etc-machine-id\") pod \"6becdddb-915e-40e0-ba03-9de124ad56c7\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.725653 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-scripts\") pod \"6becdddb-915e-40e0-ba03-9de124ad56c7\" (UID: \"6becdddb-915e-40e0-ba03-9de124ad56c7\") " Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.725824 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6becdddb-915e-40e0-ba03-9de124ad56c7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6becdddb-915e-40e0-ba03-9de124ad56c7" (UID: "6becdddb-915e-40e0-ba03-9de124ad56c7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.726561 4877 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6becdddb-915e-40e0-ba03-9de124ad56c7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.735169 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-scripts" (OuterVolumeSpecName: "scripts") pod "6becdddb-915e-40e0-ba03-9de124ad56c7" (UID: "6becdddb-915e-40e0-ba03-9de124ad56c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.735463 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6becdddb-915e-40e0-ba03-9de124ad56c7-kube-api-access-8vnj2" (OuterVolumeSpecName: "kube-api-access-8vnj2") pod "6becdddb-915e-40e0-ba03-9de124ad56c7" (UID: "6becdddb-915e-40e0-ba03-9de124ad56c7"). InnerVolumeSpecName "kube-api-access-8vnj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.735589 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6becdddb-915e-40e0-ba03-9de124ad56c7" (UID: "6becdddb-915e-40e0-ba03-9de124ad56c7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.769428 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6becdddb-915e-40e0-ba03-9de124ad56c7" (UID: "6becdddb-915e-40e0-ba03-9de124ad56c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.818120 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-config-data" (OuterVolumeSpecName: "config-data") pod "6becdddb-915e-40e0-ba03-9de124ad56c7" (UID: "6becdddb-915e-40e0-ba03-9de124ad56c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.828139 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.828375 4877 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.828439 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vnj2\" (UniqueName: \"kubernetes.io/projected/6becdddb-915e-40e0-ba03-9de124ad56c7-kube-api-access-8vnj2\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.828564 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.828639 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6becdddb-915e-40e0-ba03-9de124ad56c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.853983 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-957zv" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.853977 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-957zv" event={"ID":"6becdddb-915e-40e0-ba03-9de124ad56c7","Type":"ContainerDied","Data":"c213ca938bafe85b2cabd0df83842e2c305a65dc89e093bb8768dedd3b93d6c1"} Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.854154 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c213ca938bafe85b2cabd0df83842e2c305a65dc89e093bb8768dedd3b93d6c1" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.858141 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7559f7bb56-qgpxn" event={"ID":"d9e6b9b4-d0da-4d73-913a-6e03bbb512c8","Type":"ContainerStarted","Data":"dfc58f9d1446b5df1475d907dc7507c6be5dbf1887892af66f583423d2a6b375"} Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.859642 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.859683 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.867134 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerStarted","Data":"a253b8d257bbecd20dab1453c8b4b425474c70e564c97034f52f7ed912c9aadf"} Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.869648 4877 generic.go:334] "Generic (PLEG): container finished" podID="ece13606-7152-4723-a482-b27d1cf022d5" containerID="23213e1afd081109c0a38613de24cff87fc27912a80b98b2cd6b03a85c207866" exitCode=0 Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.870076 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493661-94ck2" event={"ID":"ece13606-7152-4723-a482-b27d1cf022d5","Type":"ContainerDied","Data":"23213e1afd081109c0a38613de24cff87fc27912a80b98b2cd6b03a85c207866"} Jan 28 17:01:16 crc kubenswrapper[4877]: I0128 17:01:16.887546 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7559f7bb56-qgpxn" podStartSLOduration=5.88752803 podStartE2EDuration="5.88752803s" podCreationTimestamp="2026-01-28 17:01:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:16.884946281 +0000 UTC m=+1580.443273189" watchObservedRunningTime="2026-01-28 17:01:16.88752803 +0000 UTC m=+1580.445854918" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.159513 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-r26hn"] Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.159783 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerName="dnsmasq-dns" containerID="cri-o://c8dc1c60fe66e5f67c149fda9f6738fd71fdc01508f997eb3bdda1a70f764bf9" gracePeriod=10 Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.176996 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:17 crc kubenswrapper[4877]: E0128 17:01:17.177468 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6becdddb-915e-40e0-ba03-9de124ad56c7" containerName="cinder-db-sync" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.177504 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6becdddb-915e-40e0-ba03-9de124ad56c7" containerName="cinder-db-sync" Jan 28 17:01:17 crc kubenswrapper[4877]: E0128 17:01:17.177537 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" containerName="heat-db-sync" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.177544 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" containerName="heat-db-sync" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.177743 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" containerName="heat-db-sync" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.177774 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6becdddb-915e-40e0-ba03-9de124ad56c7" containerName="cinder-db-sync" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.179329 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.186962 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.187243 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-p2r4c" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.187519 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.197378 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.209675 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.238874 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.238932 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.238953 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b043f173-17ee-445b-a64b-ac750304a5ff-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.238998 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtnq9\" (UniqueName: \"kubernetes.io/projected/b043f173-17ee-445b-a64b-ac750304a5ff-kube-api-access-xtnq9\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.239026 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.239168 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-scripts\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.269229 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.344208 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.344553 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.344609 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b043f173-17ee-445b-a64b-ac750304a5ff-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.344725 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtnq9\" (UniqueName: \"kubernetes.io/projected/b043f173-17ee-445b-a64b-ac750304a5ff-kube-api-access-xtnq9\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.347877 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.349597 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b043f173-17ee-445b-a64b-ac750304a5ff-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.354391 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.367812 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-scripts\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.382836 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-scripts\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.382969 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.390153 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.458358 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtnq9\" (UniqueName: \"kubernetes.io/projected/b043f173-17ee-445b-a64b-ac750304a5ff-kube-api-access-xtnq9\") pod \"cinder-scheduler-0\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.517578 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69c986f6d7-bls2q"] Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.520063 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.546148 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69c986f6d7-bls2q"] Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.563940 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.575397 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnksq\" (UniqueName: \"kubernetes.io/projected/55414a13-6cc8-42cb-bb48-610740b92289-kube-api-access-fnksq\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.575528 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-nb\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.575652 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-sb\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.575699 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-swift-storage-0\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.575779 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-svc\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.575838 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-config\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.624547 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.626670 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.644046 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.661154 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678074 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data-custom\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678142 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-nb\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678226 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-scripts\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678257 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-sb\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678289 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-swift-storage-0\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678317 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e203aba-b679-45b3-9987-8a63bdb556db-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678358 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-svc\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678398 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-config\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678419 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678454 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678523 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp7d8\" (UniqueName: \"kubernetes.io/projected/1e203aba-b679-45b3-9987-8a63bdb556db-kube-api-access-vp7d8\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678571 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnksq\" (UniqueName: \"kubernetes.io/projected/55414a13-6cc8-42cb-bb48-610740b92289-kube-api-access-fnksq\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.678600 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e203aba-b679-45b3-9987-8a63bdb556db-logs\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.679779 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-nb\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.680442 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-sb\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.722442 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-config\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.723367 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-svc\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.725495 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-swift-storage-0\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.756281 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnksq\" (UniqueName: \"kubernetes.io/projected/55414a13-6cc8-42cb-bb48-610740b92289-kube-api-access-fnksq\") pod \"dnsmasq-dns-69c986f6d7-bls2q\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.767912 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfq2d" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.782212 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-scripts\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.782411 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e203aba-b679-45b3-9987-8a63bdb556db-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.782502 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.782531 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.782581 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp7d8\" (UniqueName: \"kubernetes.io/projected/1e203aba-b679-45b3-9987-8a63bdb556db-kube-api-access-vp7d8\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.782632 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e203aba-b679-45b3-9987-8a63bdb556db-logs\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.782670 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data-custom\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.783764 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e203aba-b679-45b3-9987-8a63bdb556db-logs\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.786760 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e203aba-b679-45b3-9987-8a63bdb556db-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.791276 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.799204 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-scripts\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.803667 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.830158 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data-custom\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.840340 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp7d8\" (UniqueName: \"kubernetes.io/projected/1e203aba-b679-45b3-9987-8a63bdb556db-kube-api-access-vp7d8\") pod \"cinder-api-0\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " pod="openstack/cinder-api-0" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.934860 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.937470 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-config-data\") pod \"dcc64de1-66f2-48e6-969f-61aa68773678\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.937804 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-combined-ca-bundle\") pod \"dcc64de1-66f2-48e6-969f-61aa68773678\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.938523 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-scripts\") pod \"dcc64de1-66f2-48e6-969f-61aa68773678\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.938617 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcc64de1-66f2-48e6-969f-61aa68773678-logs\") pod \"dcc64de1-66f2-48e6-969f-61aa68773678\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.939025 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbsl9\" (UniqueName: \"kubernetes.io/projected/dcc64de1-66f2-48e6-969f-61aa68773678-kube-api-access-kbsl9\") pod \"dcc64de1-66f2-48e6-969f-61aa68773678\" (UID: \"dcc64de1-66f2-48e6-969f-61aa68773678\") " Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.966427 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcc64de1-66f2-48e6-969f-61aa68773678-logs" (OuterVolumeSpecName: "logs") pod "dcc64de1-66f2-48e6-969f-61aa68773678" (UID: "dcc64de1-66f2-48e6-969f-61aa68773678"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.970941 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcc64de1-66f2-48e6-969f-61aa68773678-kube-api-access-kbsl9" (OuterVolumeSpecName: "kube-api-access-kbsl9") pod "dcc64de1-66f2-48e6-969f-61aa68773678" (UID: "dcc64de1-66f2-48e6-969f-61aa68773678"). InnerVolumeSpecName "kube-api-access-kbsl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:17 crc kubenswrapper[4877]: I0128 17:01:17.980653 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.050890 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-scripts" (OuterVolumeSpecName: "scripts") pod "dcc64de1-66f2-48e6-969f-61aa68773678" (UID: "dcc64de1-66f2-48e6-969f-61aa68773678"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.065973 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcc64de1-66f2-48e6-969f-61aa68773678-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.074680 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7b4cc7844-8vpdw"] Jan 28 17:01:18 crc kubenswrapper[4877]: E0128 17:01:18.076527 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" containerName="placement-db-sync" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.076562 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" containerName="placement-db-sync" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.077077 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" containerName="placement-db-sync" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.079191 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbsl9\" (UniqueName: \"kubernetes.io/projected/dcc64de1-66f2-48e6-969f-61aa68773678-kube-api-access-kbsl9\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.080166 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.087169 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.087441 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.117049 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-kfq2d" event={"ID":"dcc64de1-66f2-48e6-969f-61aa68773678","Type":"ContainerDied","Data":"4b3097e26be6d79f6758d39c3b6a3628ee05a0a138f8a74cfdefece65734450e"} Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.117120 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b3097e26be6d79f6758d39c3b6a3628ee05a0a138f8a74cfdefece65734450e" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.117302 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-kfq2d" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.150914 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7b4cc7844-8vpdw"] Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.213088 4877 generic.go:334] "Generic (PLEG): container finished" podID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerID="c8dc1c60fe66e5f67c149fda9f6738fd71fdc01508f997eb3bdda1a70f764bf9" exitCode=0 Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.213563 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" event={"ID":"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a","Type":"ContainerDied","Data":"c8dc1c60fe66e5f67c149fda9f6738fd71fdc01508f997eb3bdda1a70f764bf9"} Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.230083 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dcc64de1-66f2-48e6-969f-61aa68773678" (UID: "dcc64de1-66f2-48e6-969f-61aa68773678"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.267182 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-config-data" (OuterVolumeSpecName: "config-data") pod "dcc64de1-66f2-48e6-969f-61aa68773678" (UID: "dcc64de1-66f2-48e6-969f-61aa68773678"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.267440 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-public-tls-certs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.275411 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-scripts\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.275685 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c679cf09-1426-4fc7-85c6-b6be6cfb6153-logs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.276664 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-combined-ca-bundle\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.276918 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-config-data\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.322236 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gw55\" (UniqueName: \"kubernetes.io/projected/c679cf09-1426-4fc7-85c6-b6be6cfb6153-kube-api-access-6gw55\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.322553 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-internal-tls-certs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.322728 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.322744 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.322756 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcc64de1-66f2-48e6-969f-61aa68773678-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.425924 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-public-tls-certs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.426863 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-scripts\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.426940 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c679cf09-1426-4fc7-85c6-b6be6cfb6153-logs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.426970 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-combined-ca-bundle\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.427031 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-config-data\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.427074 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gw55\" (UniqueName: \"kubernetes.io/projected/c679cf09-1426-4fc7-85c6-b6be6cfb6153-kube-api-access-6gw55\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.427282 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-internal-tls-certs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.430374 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c679cf09-1426-4fc7-85c6-b6be6cfb6153-logs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.453987 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-internal-tls-certs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.464334 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-public-tls-certs\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.464842 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-combined-ca-bundle\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.478203 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-config-data\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.478528 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c679cf09-1426-4fc7-85c6-b6be6cfb6153-scripts\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.488917 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gw55\" (UniqueName: \"kubernetes.io/projected/c679cf09-1426-4fc7-85c6-b6be6cfb6153-kube-api-access-6gw55\") pod \"placement-7b4cc7844-8vpdw\" (UID: \"c679cf09-1426-4fc7-85c6-b6be6cfb6153\") " pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.498791 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.529875 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-nb\") pod \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.530034 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-svc\") pod \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.530227 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df8cp\" (UniqueName: \"kubernetes.io/projected/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-kube-api-access-df8cp\") pod \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.530467 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-swift-storage-0\") pod \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.530610 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-config\") pod \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.530701 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-sb\") pod \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\" (UID: \"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a\") " Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.546436 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-kube-api-access-df8cp" (OuterVolumeSpecName: "kube-api-access-df8cp") pod "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" (UID: "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a"). InnerVolumeSpecName "kube-api-access-df8cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.683670 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.699499 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df8cp\" (UniqueName: \"kubernetes.io/projected/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-kube-api-access-df8cp\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.747168 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:18 crc kubenswrapper[4877]: W0128 17:01:18.750347 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb043f173_17ee_445b_a64b_ac750304a5ff.slice/crio-85a8d185293306515436187d8586e61c25c0a813a21e5be2835cc48995f2f6c0 WatchSource:0}: Error finding container 85a8d185293306515436187d8586e61c25c0a813a21e5be2835cc48995f2f6c0: Status 404 returned error can't find the container with id 85a8d185293306515436187d8586e61c25c0a813a21e5be2835cc48995f2f6c0 Jan 28 17:01:18 crc kubenswrapper[4877]: I0128 17:01:18.990429 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" (UID: "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.001691 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" (UID: "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.015494 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.015523 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.094837 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" (UID: "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.095551 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-config" (OuterVolumeSpecName: "config") pod "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" (UID: "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.118210 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.118280 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.126767 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69c986f6d7-bls2q"] Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.266987 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" event={"ID":"55414a13-6cc8-42cb-bb48-610740b92289","Type":"ContainerStarted","Data":"e21f6db4efbc1e331291924b4b319920e096b39267594c9ddda602eadc839770"} Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.270274 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" (UID: "aaa11b8e-bf0e-4b41-9db5-37eb4389e22a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.310495 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerStarted","Data":"e564c705618b35908169daca5c07bc8002963487b4e0b47ffc0c6e119508d5a2"} Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.348900 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.386595 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vg4fw" event={"ID":"fad3a099-fcee-4a91-9de7-c67834a9743c","Type":"ContainerStarted","Data":"b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f"} Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.434713 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493661-94ck2" event={"ID":"ece13606-7152-4723-a482-b27d1cf022d5","Type":"ContainerDied","Data":"6b0fd9395a7fd99c1788aa32a167f5cd0e07bd320144b2733611c4f892cea2b5"} Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.435064 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b0fd9395a7fd99c1788aa32a167f5cd0e07bd320144b2733611c4f892cea2b5" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.437332 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.463224 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.473429 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" event={"ID":"aaa11b8e-bf0e-4b41-9db5-37eb4389e22a","Type":"ContainerDied","Data":"903b52c16fdafe274c4bc8eff90a1654df7e7c9de5d12a953aa3e84c2bb3eb48"} Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.473593 4877 scope.go:117] "RemoveContainer" containerID="c8dc1c60fe66e5f67c149fda9f6738fd71fdc01508f997eb3bdda1a70f764bf9" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.473822 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.511715 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vg4fw" podStartSLOduration=16.174976308 podStartE2EDuration="22.511674402s" podCreationTimestamp="2026-01-28 17:00:57 +0000 UTC" firstStartedPulling="2026-01-28 17:01:11.601986877 +0000 UTC m=+1575.160313765" lastFinishedPulling="2026-01-28 17:01:17.938684971 +0000 UTC m=+1581.497011859" observedRunningTime="2026-01-28 17:01:19.40957868 +0000 UTC m=+1582.967905568" watchObservedRunningTime="2026-01-28 17:01:19.511674402 +0000 UTC m=+1583.070001290" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.516447 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b043f173-17ee-445b-a64b-ac750304a5ff","Type":"ContainerStarted","Data":"85a8d185293306515436187d8586e61c25c0a813a21e5be2835cc48995f2f6c0"} Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.560581 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-combined-ca-bundle\") pod \"ece13606-7152-4723-a482-b27d1cf022d5\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.561489 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-fernet-keys\") pod \"ece13606-7152-4723-a482-b27d1cf022d5\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.561626 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gq2f6\" (UniqueName: \"kubernetes.io/projected/ece13606-7152-4723-a482-b27d1cf022d5-kube-api-access-gq2f6\") pod \"ece13606-7152-4723-a482-b27d1cf022d5\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.572636 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ece13606-7152-4723-a482-b27d1cf022d5" (UID: "ece13606-7152-4723-a482-b27d1cf022d5"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.584345 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-config-data\") pod \"ece13606-7152-4723-a482-b27d1cf022d5\" (UID: \"ece13606-7152-4723-a482-b27d1cf022d5\") " Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.585733 4877 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.619937 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ece13606-7152-4723-a482-b27d1cf022d5-kube-api-access-gq2f6" (OuterVolumeSpecName: "kube-api-access-gq2f6") pod "ece13606-7152-4723-a482-b27d1cf022d5" (UID: "ece13606-7152-4723-a482-b27d1cf022d5"). InnerVolumeSpecName "kube-api-access-gq2f6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.707342 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gq2f6\" (UniqueName: \"kubernetes.io/projected/ece13606-7152-4723-a482-b27d1cf022d5-kube-api-access-gq2f6\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.747542 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-r26hn"] Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.782577 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-r26hn"] Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.808505 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7b4cc7844-8vpdw"] Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.816282 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ece13606-7152-4723-a482-b27d1cf022d5" (UID: "ece13606-7152-4723-a482-b27d1cf022d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.899234 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-config-data" (OuterVolumeSpecName: "config-data") pod "ece13606-7152-4723-a482-b27d1cf022d5" (UID: "ece13606-7152-4723-a482-b27d1cf022d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.912920 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:19 crc kubenswrapper[4877]: I0128 17:01:19.913151 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ece13606-7152-4723-a482-b27d1cf022d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.103464 4877 scope.go:117] "RemoveContainer" containerID="bb6a8d1a93768af9792ca9fc0a4274d4ad82276e9fa81bc3dbaaf6dea8760ac3" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.336083 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:01:20 crc kubenswrapper[4877]: E0128 17:01:20.336708 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.449673 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.465094 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.465138 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.466520 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.466590 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.615837 4877 generic.go:334] "Generic (PLEG): container finished" podID="55414a13-6cc8-42cb-bb48-610740b92289" containerID="bebab129e5af01b9219c90b9acda44835cbf55e99559ebd59ee7984c427a9eff" exitCode=0 Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.615952 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" event={"ID":"55414a13-6cc8-42cb-bb48-610740b92289","Type":"ContainerDied","Data":"bebab129e5af01b9219c90b9acda44835cbf55e99559ebd59ee7984c427a9eff"} Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.617354 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.627813 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b4cc7844-8vpdw" event={"ID":"c679cf09-1426-4fc7-85c6-b6be6cfb6153","Type":"ContainerStarted","Data":"d184968cf6e0ac038a8b5d7600d990ea7a2bfce08bbe9ae00fdf21aab8b05463"} Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.667735 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e203aba-b679-45b3-9987-8a63bdb556db","Type":"ContainerStarted","Data":"320fb0fa31dafc6b492eefc6c9de42861f0448a04929bf5d23fd630e4c7429be"} Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.671092 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.672267 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493661-94ck2" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.678259 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.684558 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.686149 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 17:01:20 crc kubenswrapper[4877]: I0128 17:01:20.822267 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 17:01:21 crc kubenswrapper[4877]: I0128 17:01:21.402843 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" path="/var/lib/kubelet/pods/aaa11b8e-bf0e-4b41-9db5-37eb4389e22a/volumes" Jan 28 17:01:21 crc kubenswrapper[4877]: I0128 17:01:21.740558 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b4cc7844-8vpdw" event={"ID":"c679cf09-1426-4fc7-85c6-b6be6cfb6153","Type":"ContainerStarted","Data":"79e06e8aa1fefd9f06da23010045385b412de7b8756dfbc0e32611c319ddcfaa"} Jan 28 17:01:21 crc kubenswrapper[4877]: I0128 17:01:21.741530 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 17:01:21 crc kubenswrapper[4877]: I0128 17:01:21.742318 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.287210 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-94bdfbbc4-6n5dr" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.287995 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-94bdfbbc4-6n5dr" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.766154 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e203aba-b679-45b3-9987-8a63bdb556db","Type":"ContainerStarted","Data":"004c071c8c4b36125f906dc9bbb5e9889e65a4a289c9102dc9997c3a2273345d"} Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.781944 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b043f173-17ee-445b-a64b-ac750304a5ff","Type":"ContainerStarted","Data":"fff1bfd931e61d21bb174b32bda485ae4952e2a7774b40161375e0c677adb454"} Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.788373 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" event={"ID":"55414a13-6cc8-42cb-bb48-610740b92289","Type":"ContainerStarted","Data":"ce4ec7bcf353c45f6950bb0d9ef9f7033b93e11f9430a850d977be89392f4b7f"} Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.788523 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.817373 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.818557 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7b4cc7844-8vpdw" event={"ID":"c679cf09-1426-4fc7-85c6-b6be6cfb6153","Type":"ContainerStarted","Data":"b800ba668bfefed9c6694fc8d29f526d254d489eea4d398f19e3b5aec3af53fd"} Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.818678 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.819287 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" podStartSLOduration=5.819265892 podStartE2EDuration="5.819265892s" podCreationTimestamp="2026-01-28 17:01:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:22.816571859 +0000 UTC m=+1586.374898757" watchObservedRunningTime="2026-01-28 17:01:22.819265892 +0000 UTC m=+1586.377592780" Jan 28 17:01:22 crc kubenswrapper[4877]: I0128 17:01:22.887832 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7b4cc7844-8vpdw" podStartSLOduration=5.887800922 podStartE2EDuration="5.887800922s" podCreationTimestamp="2026-01-28 17:01:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:22.850619423 +0000 UTC m=+1586.408946331" watchObservedRunningTime="2026-01-28 17:01:22.887800922 +0000 UTC m=+1586.446127810" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.089631 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-59d5ff467f-r26hn" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.195:5353: i/o timeout" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.136995 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.161234 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.838556 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e203aba-b679-45b3-9987-8a63bdb556db","Type":"ContainerStarted","Data":"45d34b4d7c70dddd2467b9e38385da961dad5b64f625d55daf6fd4518f74ce84"} Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.839130 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api-log" containerID="cri-o://004c071c8c4b36125f906dc9bbb5e9889e65a4a289c9102dc9997c3a2273345d" gracePeriod=30 Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.839885 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.840193 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api" containerID="cri-o://45d34b4d7c70dddd2467b9e38385da961dad5b64f625d55daf6fd4518f74ce84" gracePeriod=30 Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.848609 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b043f173-17ee-445b-a64b-ac750304a5ff","Type":"ContainerStarted","Data":"0fc1e365f3c31c663d06a833d418b716b9b1ab21f78c52542861b21aab96e9c6"} Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.892789 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerStarted","Data":"e1a954fc4dec44449b637dbe069f2a5504ea7a74d9787dbddccf239fcaea4f21"} Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.894910 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.895222 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.895288 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.895343 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.895355 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.905721 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.519297347 podStartE2EDuration="6.905702089s" podCreationTimestamp="2026-01-28 17:01:17 +0000 UTC" firstStartedPulling="2026-01-28 17:01:18.77820938 +0000 UTC m=+1582.336536268" lastFinishedPulling="2026-01-28 17:01:20.164614122 +0000 UTC m=+1583.722941010" observedRunningTime="2026-01-28 17:01:23.905443941 +0000 UTC m=+1587.463770849" watchObservedRunningTime="2026-01-28 17:01:23.905702089 +0000 UTC m=+1587.464028967" Jan 28 17:01:23 crc kubenswrapper[4877]: I0128 17:01:23.905828 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.905824282 podStartE2EDuration="6.905824282s" podCreationTimestamp="2026-01-28 17:01:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:23.865592632 +0000 UTC m=+1587.423919540" watchObservedRunningTime="2026-01-28 17:01:23.905824282 +0000 UTC m=+1587.464151170" Jan 28 17:01:24 crc kubenswrapper[4877]: I0128 17:01:24.070289 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.880532169 podStartE2EDuration="13.055030968s" podCreationTimestamp="2026-01-28 17:01:11 +0000 UTC" firstStartedPulling="2026-01-28 17:01:12.845100091 +0000 UTC m=+1576.403426979" lastFinishedPulling="2026-01-28 17:01:23.01959889 +0000 UTC m=+1586.577925778" observedRunningTime="2026-01-28 17:01:23.98618747 +0000 UTC m=+1587.544514368" watchObservedRunningTime="2026-01-28 17:01:24.055030968 +0000 UTC m=+1587.613357856" Jan 28 17:01:24 crc kubenswrapper[4877]: I0128 17:01:24.917876 4877 generic.go:334] "Generic (PLEG): container finished" podID="1e203aba-b679-45b3-9987-8a63bdb556db" containerID="004c071c8c4b36125f906dc9bbb5e9889e65a4a289c9102dc9997c3a2273345d" exitCode=143 Jan 28 17:01:24 crc kubenswrapper[4877]: I0128 17:01:24.917984 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e203aba-b679-45b3-9987-8a63bdb556db","Type":"ContainerDied","Data":"004c071c8c4b36125f906dc9bbb5e9889e65a4a289c9102dc9997c3a2273345d"} Jan 28 17:01:25 crc kubenswrapper[4877]: I0128 17:01:25.290362 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:25 crc kubenswrapper[4877]: I0128 17:01:25.636902 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7559f7bb56-qgpxn" Jan 28 17:01:25 crc kubenswrapper[4877]: I0128 17:01:25.738749 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-94bdfbbc4-6n5dr"] Jan 28 17:01:25 crc kubenswrapper[4877]: I0128 17:01:25.742793 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-94bdfbbc4-6n5dr" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api-log" containerID="cri-o://840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9" gracePeriod=30 Jan 28 17:01:25 crc kubenswrapper[4877]: I0128 17:01:25.743415 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-94bdfbbc4-6n5dr" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api" containerID="cri-o://a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204" gracePeriod=30 Jan 28 17:01:25 crc kubenswrapper[4877]: I0128 17:01:25.943003 4877 generic.go:334] "Generic (PLEG): container finished" podID="ae4830b8-7964-4686-a784-e357a560ec78" containerID="840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9" exitCode=143 Jan 28 17:01:25 crc kubenswrapper[4877]: I0128 17:01:25.943785 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94bdfbbc4-6n5dr" event={"ID":"ae4830b8-7964-4686-a784-e357a560ec78","Type":"ContainerDied","Data":"840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9"} Jan 28 17:01:26 crc kubenswrapper[4877]: I0128 17:01:26.675642 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:26 crc kubenswrapper[4877]: I0128 17:01:26.676162 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:01:26 crc kubenswrapper[4877]: I0128 17:01:26.677100 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:26 crc kubenswrapper[4877]: I0128 17:01:26.782158 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 17:01:26 crc kubenswrapper[4877]: I0128 17:01:26.782269 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:01:26 crc kubenswrapper[4877]: I0128 17:01:26.785550 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 17:01:27 crc kubenswrapper[4877]: I0128 17:01:27.565462 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 17:01:27 crc kubenswrapper[4877]: I0128 17:01:27.778625 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-85cd7458d6-gb267" Jan 28 17:01:27 crc kubenswrapper[4877]: I0128 17:01:27.917396 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 17:01:27 crc kubenswrapper[4877]: I0128 17:01:27.939316 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.053611 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-b72zf"] Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.053993 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" podUID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerName="dnsmasq-dns" containerID="cri-o://05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2" gracePeriod=10 Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.205337 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.207760 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.293519 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.822658 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.942902 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-config\") pod \"9efc1e14-125d-467c-ae5e-b124a75d455d\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.942996 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-nb\") pod \"9efc1e14-125d-467c-ae5e-b124a75d455d\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.943374 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-swift-storage-0\") pod \"9efc1e14-125d-467c-ae5e-b124a75d455d\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.943550 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-sb\") pod \"9efc1e14-125d-467c-ae5e-b124a75d455d\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.943667 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-svc\") pod \"9efc1e14-125d-467c-ae5e-b124a75d455d\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.943740 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm5hl\" (UniqueName: \"kubernetes.io/projected/9efc1e14-125d-467c-ae5e-b124a75d455d-kube-api-access-nm5hl\") pod \"9efc1e14-125d-467c-ae5e-b124a75d455d\" (UID: \"9efc1e14-125d-467c-ae5e-b124a75d455d\") " Jan 28 17:01:28 crc kubenswrapper[4877]: I0128 17:01:28.955965 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9efc1e14-125d-467c-ae5e-b124a75d455d-kube-api-access-nm5hl" (OuterVolumeSpecName: "kube-api-access-nm5hl") pod "9efc1e14-125d-467c-ae5e-b124a75d455d" (UID: "9efc1e14-125d-467c-ae5e-b124a75d455d"). InnerVolumeSpecName "kube-api-access-nm5hl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.013150 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9efc1e14-125d-467c-ae5e-b124a75d455d" (UID: "9efc1e14-125d-467c-ae5e-b124a75d455d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.016899 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-94bdfbbc4-6n5dr" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:44018->10.217.0.196:9311: read: connection reset by peer" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.017439 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-94bdfbbc4-6n5dr" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:44004->10.217.0.196:9311: read: connection reset by peer" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.048616 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.049230 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm5hl\" (UniqueName: \"kubernetes.io/projected/9efc1e14-125d-467c-ae5e-b124a75d455d-kube-api-access-nm5hl\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.049299 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9efc1e14-125d-467c-ae5e-b124a75d455d" (UID: "9efc1e14-125d-467c-ae5e-b124a75d455d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.056124 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9efc1e14-125d-467c-ae5e-b124a75d455d" (UID: "9efc1e14-125d-467c-ae5e-b124a75d455d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.066143 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9efc1e14-125d-467c-ae5e-b124a75d455d" (UID: "9efc1e14-125d-467c-ae5e-b124a75d455d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.069014 4877 generic.go:334] "Generic (PLEG): container finished" podID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerID="05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2" exitCode=0 Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.069124 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.069157 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" event={"ID":"9efc1e14-125d-467c-ae5e-b124a75d455d","Type":"ContainerDied","Data":"05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2"} Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.074078 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-b72zf" event={"ID":"9efc1e14-125d-467c-ae5e-b124a75d455d","Type":"ContainerDied","Data":"2d24dbd728ef68cd283ebc5c21bcb772db95c6997cb3ec57ea1145b1b78f2211"} Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.074114 4877 scope.go:117] "RemoveContainer" containerID="05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.099400 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-config" (OuterVolumeSpecName: "config") pod "9efc1e14-125d-467c-ae5e-b124a75d455d" (UID: "9efc1e14-125d-467c-ae5e-b124a75d455d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.142175 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.151400 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.151439 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.151452 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.151463 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9efc1e14-125d-467c-ae5e-b124a75d455d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.206001 4877 scope.go:117] "RemoveContainer" containerID="dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.248156 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vg4fw"] Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.282271 4877 scope.go:117] "RemoveContainer" containerID="05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2" Jan 28 17:01:29 crc kubenswrapper[4877]: E0128 17:01:29.283676 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2\": container with ID starting with 05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2 not found: ID does not exist" containerID="05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.283737 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2"} err="failed to get container status \"05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2\": rpc error: code = NotFound desc = could not find container \"05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2\": container with ID starting with 05d8953cd46e2af9ce5e6a4072e52fba4ea4dc43c96e52aaffbe958d3da257f2 not found: ID does not exist" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.283773 4877 scope.go:117] "RemoveContainer" containerID="dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285" Jan 28 17:01:29 crc kubenswrapper[4877]: E0128 17:01:29.284609 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285\": container with ID starting with dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285 not found: ID does not exist" containerID="dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.284718 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285"} err="failed to get container status \"dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285\": rpc error: code = NotFound desc = could not find container \"dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285\": container with ID starting with dcba3110f33020969dd4d3c1084f8ff4351539536854437c38dac9d7ad44b285 not found: ID does not exist" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.481617 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-b72zf"] Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.505691 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-b72zf"] Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.797408 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.875803 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrn4\" (UniqueName: \"kubernetes.io/projected/ae4830b8-7964-4686-a784-e357a560ec78-kube-api-access-mnrn4\") pod \"ae4830b8-7964-4686-a784-e357a560ec78\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.876167 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae4830b8-7964-4686-a784-e357a560ec78-logs\") pod \"ae4830b8-7964-4686-a784-e357a560ec78\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.876249 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-combined-ca-bundle\") pod \"ae4830b8-7964-4686-a784-e357a560ec78\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.876336 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data-custom\") pod \"ae4830b8-7964-4686-a784-e357a560ec78\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.876397 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data\") pod \"ae4830b8-7964-4686-a784-e357a560ec78\" (UID: \"ae4830b8-7964-4686-a784-e357a560ec78\") " Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.876633 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae4830b8-7964-4686-a784-e357a560ec78-logs" (OuterVolumeSpecName: "logs") pod "ae4830b8-7964-4686-a784-e357a560ec78" (UID: "ae4830b8-7964-4686-a784-e357a560ec78"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.877121 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae4830b8-7964-4686-a784-e357a560ec78-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.888821 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae4830b8-7964-4686-a784-e357a560ec78-kube-api-access-mnrn4" (OuterVolumeSpecName: "kube-api-access-mnrn4") pod "ae4830b8-7964-4686-a784-e357a560ec78" (UID: "ae4830b8-7964-4686-a784-e357a560ec78"). InnerVolumeSpecName "kube-api-access-mnrn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.893136 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ae4830b8-7964-4686-a784-e357a560ec78" (UID: "ae4830b8-7964-4686-a784-e357a560ec78"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.921432 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae4830b8-7964-4686-a784-e357a560ec78" (UID: "ae4830b8-7964-4686-a784-e357a560ec78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.966128 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data" (OuterVolumeSpecName: "config-data") pod "ae4830b8-7964-4686-a784-e357a560ec78" (UID: "ae4830b8-7964-4686-a784-e357a560ec78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.979803 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrn4\" (UniqueName: \"kubernetes.io/projected/ae4830b8-7964-4686-a784-e357a560ec78-kube-api-access-mnrn4\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.979835 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.979844 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:29 crc kubenswrapper[4877]: I0128 17:01:29.979855 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae4830b8-7964-4686-a784-e357a560ec78-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.098499 4877 generic.go:334] "Generic (PLEG): container finished" podID="ae4830b8-7964-4686-a784-e357a560ec78" containerID="a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204" exitCode=0 Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.098602 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94bdfbbc4-6n5dr" event={"ID":"ae4830b8-7964-4686-a784-e357a560ec78","Type":"ContainerDied","Data":"a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204"} Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.098657 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94bdfbbc4-6n5dr" event={"ID":"ae4830b8-7964-4686-a784-e357a560ec78","Type":"ContainerDied","Data":"e80d7ea3cda37f0194d0ca86cd98c318db77441922dd1a031591ebcb71872690"} Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.098683 4877 scope.go:117] "RemoveContainer" containerID="a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.098879 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94bdfbbc4-6n5dr" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.143903 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-94bdfbbc4-6n5dr"] Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.158761 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-94bdfbbc4-6n5dr"] Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.162465 4877 scope.go:117] "RemoveContainer" containerID="840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.196800 4877 scope.go:117] "RemoveContainer" containerID="a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.197566 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204\": container with ID starting with a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204 not found: ID does not exist" containerID="a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.197652 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204"} err="failed to get container status \"a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204\": rpc error: code = NotFound desc = could not find container \"a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204\": container with ID starting with a0c8bae53352a39a8f8d3191159495884656ef26e377b4e4f8d35f5342fad204 not found: ID does not exist" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.197737 4877 scope.go:117] "RemoveContainer" containerID="840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.198298 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9\": container with ID starting with 840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9 not found: ID does not exist" containerID="840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.198342 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9"} err="failed to get container status \"840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9\": rpc error: code = NotFound desc = could not find container \"840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9\": container with ID starting with 840c9aa34a4c1f823251d2f2c2a053f2400e1b991e11f61f3eebbb199297baa9 not found: ID does not exist" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.366406 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.366937 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerName="dnsmasq-dns" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.366954 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerName="dnsmasq-dns" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.366978 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ece13606-7152-4723-a482-b27d1cf022d5" containerName="keystone-cron" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.366987 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ece13606-7152-4723-a482-b27d1cf022d5" containerName="keystone-cron" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.367001 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerName="init" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.367007 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerName="init" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.367016 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerName="init" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.367022 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerName="init" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.367035 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.367041 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.367050 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api-log" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.367055 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api-log" Jan 28 17:01:30 crc kubenswrapper[4877]: E0128 17:01:30.367074 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerName="dnsmasq-dns" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.367081 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerName="dnsmasq-dns" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.368126 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="9efc1e14-125d-467c-ae5e-b124a75d455d" containerName="dnsmasq-dns" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.368145 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ece13606-7152-4723-a482-b27d1cf022d5" containerName="keystone-cron" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.368160 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.368171 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae4830b8-7964-4686-a784-e357a560ec78" containerName="barbican-api-log" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.368195 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="aaa11b8e-bf0e-4b41-9db5-37eb4389e22a" containerName="dnsmasq-dns" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.369009 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.372079 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.372248 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.374002 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-jxn6s" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.391319 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.510539 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdc7l\" (UniqueName: \"kubernetes.io/projected/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-kube-api-access-fdc7l\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.510744 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.510947 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-openstack-config-secret\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.511178 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-openstack-config\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.613964 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdc7l\" (UniqueName: \"kubernetes.io/projected/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-kube-api-access-fdc7l\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.614082 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.614164 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-openstack-config-secret\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.615065 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-openstack-config\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.615850 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-openstack-config\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.620803 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-openstack-config-secret\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.628141 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.635891 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdc7l\" (UniqueName: \"kubernetes.io/projected/e5410b8c-cff8-4df4-885f-e550cf3d6dfd-kube-api-access-fdc7l\") pod \"openstackclient\" (UID: \"e5410b8c-cff8-4df4-885f-e550cf3d6dfd\") " pod="openstack/openstackclient" Jan 28 17:01:30 crc kubenswrapper[4877]: I0128 17:01:30.688954 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.114331 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vg4fw" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="registry-server" containerID="cri-o://b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f" gracePeriod=2 Jan 28 17:01:31 crc kubenswrapper[4877]: E0128 17:01:31.244414 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfad3a099_fcee_4a91_9de7_c67834a9743c.slice/crio-conmon-b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfad3a099_fcee_4a91_9de7_c67834a9743c.slice/crio-b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f.scope\": RecentStats: unable to find data in memory cache]" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.325271 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.363552 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9efc1e14-125d-467c-ae5e-b124a75d455d" path="/var/lib/kubelet/pods/9efc1e14-125d-467c-ae5e-b124a75d455d/volumes" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.368155 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae4830b8-7964-4686-a784-e357a560ec78" path="/var/lib/kubelet/pods/ae4830b8-7964-4686-a784-e357a560ec78/volumes" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.757923 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.843211 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-utilities\") pod \"fad3a099-fcee-4a91-9de7-c67834a9743c\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.843266 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-catalog-content\") pod \"fad3a099-fcee-4a91-9de7-c67834a9743c\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.843382 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dm59h\" (UniqueName: \"kubernetes.io/projected/fad3a099-fcee-4a91-9de7-c67834a9743c-kube-api-access-dm59h\") pod \"fad3a099-fcee-4a91-9de7-c67834a9743c\" (UID: \"fad3a099-fcee-4a91-9de7-c67834a9743c\") " Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.844585 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-utilities" (OuterVolumeSpecName: "utilities") pod "fad3a099-fcee-4a91-9de7-c67834a9743c" (UID: "fad3a099-fcee-4a91-9de7-c67834a9743c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.852738 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fad3a099-fcee-4a91-9de7-c67834a9743c-kube-api-access-dm59h" (OuterVolumeSpecName: "kube-api-access-dm59h") pod "fad3a099-fcee-4a91-9de7-c67834a9743c" (UID: "fad3a099-fcee-4a91-9de7-c67834a9743c"). InnerVolumeSpecName "kube-api-access-dm59h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.872345 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fad3a099-fcee-4a91-9de7-c67834a9743c" (UID: "fad3a099-fcee-4a91-9de7-c67834a9743c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.945685 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.945717 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad3a099-fcee-4a91-9de7-c67834a9743c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:31 crc kubenswrapper[4877]: I0128 17:01:31.945729 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dm59h\" (UniqueName: \"kubernetes.io/projected/fad3a099-fcee-4a91-9de7-c67834a9743c-kube-api-access-dm59h\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.129684 4877 generic.go:334] "Generic (PLEG): container finished" podID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerID="b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f" exitCode=0 Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.129753 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vg4fw" event={"ID":"fad3a099-fcee-4a91-9de7-c67834a9743c","Type":"ContainerDied","Data":"b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f"} Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.129783 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vg4fw" event={"ID":"fad3a099-fcee-4a91-9de7-c67834a9743c","Type":"ContainerDied","Data":"3cca45254379da0873efbe516c75b852b51ca67a6ac343f523f4acdf68142309"} Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.129803 4877 scope.go:117] "RemoveContainer" containerID="b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.129956 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vg4fw" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.136738 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e5410b8c-cff8-4df4-885f-e550cf3d6dfd","Type":"ContainerStarted","Data":"77a232531aa0f64b0c5ba24ec5857cb6befdaec90238d6461322f46318a28ecb"} Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.173139 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vg4fw"] Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.194467 4877 scope.go:117] "RemoveContainer" containerID="813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.196752 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vg4fw"] Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.222064 4877 scope.go:117] "RemoveContainer" containerID="23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.262763 4877 scope.go:117] "RemoveContainer" containerID="b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f" Jan 28 17:01:32 crc kubenswrapper[4877]: E0128 17:01:32.264242 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f\": container with ID starting with b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f not found: ID does not exist" containerID="b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.264288 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f"} err="failed to get container status \"b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f\": rpc error: code = NotFound desc = could not find container \"b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f\": container with ID starting with b3a91866f1e5bb57d0931f88b7b1e3e600b8d548349a14117d9eef05e850e17f not found: ID does not exist" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.264341 4877 scope.go:117] "RemoveContainer" containerID="813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a" Jan 28 17:01:32 crc kubenswrapper[4877]: E0128 17:01:32.264930 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a\": container with ID starting with 813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a not found: ID does not exist" containerID="813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.264978 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a"} err="failed to get container status \"813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a\": rpc error: code = NotFound desc = could not find container \"813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a\": container with ID starting with 813c41ef5472727efe970f568129f444d72ff37973fc90f645af5ae37011da9a not found: ID does not exist" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.265006 4877 scope.go:117] "RemoveContainer" containerID="23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917" Jan 28 17:01:32 crc kubenswrapper[4877]: E0128 17:01:32.265707 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917\": container with ID starting with 23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917 not found: ID does not exist" containerID="23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.265756 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917"} err="failed to get container status \"23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917\": rpc error: code = NotFound desc = could not find container \"23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917\": container with ID starting with 23d7cf3fea1a43cd0753c3b17caf36b466ab80a868f25dab496392167bc4b917 not found: ID does not exist" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.576042 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 28 17:01:32 crc kubenswrapper[4877]: I0128 17:01:32.623930 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:33 crc kubenswrapper[4877]: I0128 17:01:33.160813 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="cinder-scheduler" containerID="cri-o://fff1bfd931e61d21bb174b32bda485ae4952e2a7774b40161375e0c677adb454" gracePeriod=30 Jan 28 17:01:33 crc kubenswrapper[4877]: I0128 17:01:33.160913 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="probe" containerID="cri-o://0fc1e365f3c31c663d06a833d418b716b9b1ab21f78c52542861b21aab96e9c6" gracePeriod=30 Jan 28 17:01:33 crc kubenswrapper[4877]: I0128 17:01:33.351553 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" path="/var/lib/kubelet/pods/fad3a099-fcee-4a91-9de7-c67834a9743c/volumes" Jan 28 17:01:34 crc kubenswrapper[4877]: I0128 17:01:34.175527 4877 generic.go:334] "Generic (PLEG): container finished" podID="b043f173-17ee-445b-a64b-ac750304a5ff" containerID="0fc1e365f3c31c663d06a833d418b716b9b1ab21f78c52542861b21aab96e9c6" exitCode=0 Jan 28 17:01:34 crc kubenswrapper[4877]: I0128 17:01:34.175533 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b043f173-17ee-445b-a64b-ac750304a5ff","Type":"ContainerDied","Data":"0fc1e365f3c31c663d06a833d418b716b9b1ab21f78c52542861b21aab96e9c6"} Jan 28 17:01:34 crc kubenswrapper[4877]: I0128 17:01:34.178517 4877 generic.go:334] "Generic (PLEG): container finished" podID="dc6b6b48-855c-412b-af8b-be4c27962c4b" containerID="9d6ab7a208b419836251b13cfebbbb207c58e5cda6e8ceadb24190892c535403" exitCode=0 Jan 28 17:01:34 crc kubenswrapper[4877]: I0128 17:01:34.178559 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4wxn6" event={"ID":"dc6b6b48-855c-412b-af8b-be4c27962c4b","Type":"ContainerDied","Data":"9d6ab7a208b419836251b13cfebbbb207c58e5cda6e8ceadb24190892c535403"} Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.331620 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:01:35 crc kubenswrapper[4877]: E0128 17:01:35.331968 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.373646 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-86b8867947-jkp8w"] Jan 28 17:01:35 crc kubenswrapper[4877]: E0128 17:01:35.375125 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="extract-content" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.382337 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="extract-content" Jan 28 17:01:35 crc kubenswrapper[4877]: E0128 17:01:35.382511 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="extract-utilities" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.382587 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="extract-utilities" Jan 28 17:01:35 crc kubenswrapper[4877]: E0128 17:01:35.382708 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="registry-server" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.382761 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="registry-server" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.383283 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad3a099-fcee-4a91-9de7-c67834a9743c" containerName="registry-server" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.385002 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-86b8867947-jkp8w"] Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.385231 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.394174 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.394387 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.394592 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.448076 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98234314-b081-449f-b87c-b562dd7eb209-run-httpd\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.448193 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-config-data\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.448281 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nmhf\" (UniqueName: \"kubernetes.io/projected/98234314-b081-449f-b87c-b562dd7eb209-kube-api-access-9nmhf\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.448323 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-public-tls-certs\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.448354 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-internal-tls-certs\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.448388 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-combined-ca-bundle\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.449098 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/98234314-b081-449f-b87c-b562dd7eb209-etc-swift\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.449660 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98234314-b081-449f-b87c-b562dd7eb209-log-httpd\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553285 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/98234314-b081-449f-b87c-b562dd7eb209-etc-swift\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553348 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98234314-b081-449f-b87c-b562dd7eb209-log-httpd\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553411 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98234314-b081-449f-b87c-b562dd7eb209-run-httpd\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553441 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-config-data\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553497 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nmhf\" (UniqueName: \"kubernetes.io/projected/98234314-b081-449f-b87c-b562dd7eb209-kube-api-access-9nmhf\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553520 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-public-tls-certs\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553544 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-internal-tls-certs\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.553574 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-combined-ca-bundle\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.558307 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98234314-b081-449f-b87c-b562dd7eb209-log-httpd\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.558969 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/98234314-b081-449f-b87c-b562dd7eb209-run-httpd\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.562669 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-public-tls-certs\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.568559 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-combined-ca-bundle\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.568680 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/98234314-b081-449f-b87c-b562dd7eb209-etc-swift\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.570148 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-internal-tls-certs\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.571254 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98234314-b081-449f-b87c-b562dd7eb209-config-data\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.587536 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nmhf\" (UniqueName: \"kubernetes.io/projected/98234314-b081-449f-b87c-b562dd7eb209-kube-api-access-9nmhf\") pod \"swift-proxy-86b8867947-jkp8w\" (UID: \"98234314-b081-449f-b87c-b562dd7eb209\") " pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:35 crc kubenswrapper[4877]: I0128 17:01:35.721682 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:36 crc kubenswrapper[4877]: I0128 17:01:36.024278 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 28 17:01:36 crc kubenswrapper[4877]: I0128 17:01:36.206640 4877 generic.go:334] "Generic (PLEG): container finished" podID="b043f173-17ee-445b-a64b-ac750304a5ff" containerID="fff1bfd931e61d21bb174b32bda485ae4952e2a7774b40161375e0c677adb454" exitCode=0 Jan 28 17:01:36 crc kubenswrapper[4877]: I0128 17:01:36.206688 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b043f173-17ee-445b-a64b-ac750304a5ff","Type":"ContainerDied","Data":"fff1bfd931e61d21bb174b32bda485ae4952e2a7774b40161375e0c677adb454"} Jan 28 17:01:37 crc kubenswrapper[4877]: I0128 17:01:37.803107 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:37 crc kubenswrapper[4877]: I0128 17:01:37.804529 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-central-agent" containerID="cri-o://5a80b0963b889c7583031bd7eabe06771d3302929136286d0e3ffbd8e46de522" gracePeriod=30 Jan 28 17:01:37 crc kubenswrapper[4877]: I0128 17:01:37.804582 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="proxy-httpd" containerID="cri-o://e1a954fc4dec44449b637dbe069f2a5504ea7a74d9787dbddccf239fcaea4f21" gracePeriod=30 Jan 28 17:01:37 crc kubenswrapper[4877]: I0128 17:01:37.804696 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-notification-agent" containerID="cri-o://a253b8d257bbecd20dab1453c8b4b425474c70e564c97034f52f7ed912c9aadf" gracePeriod=30 Jan 28 17:01:37 crc kubenswrapper[4877]: I0128 17:01:37.804923 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="sg-core" containerID="cri-o://e564c705618b35908169daca5c07bc8002963487b4e0b47ffc0c6e119508d5a2" gracePeriod=30 Jan 28 17:01:37 crc kubenswrapper[4877]: I0128 17:01:37.824392 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.199:3000/\": EOF" Jan 28 17:01:38 crc kubenswrapper[4877]: I0128 17:01:38.240864 4877 generic.go:334] "Generic (PLEG): container finished" podID="747d3fca-be49-419c-b42f-e746edee5eda" containerID="e1a954fc4dec44449b637dbe069f2a5504ea7a74d9787dbddccf239fcaea4f21" exitCode=0 Jan 28 17:01:38 crc kubenswrapper[4877]: I0128 17:01:38.241204 4877 generic.go:334] "Generic (PLEG): container finished" podID="747d3fca-be49-419c-b42f-e746edee5eda" containerID="e564c705618b35908169daca5c07bc8002963487b4e0b47ffc0c6e119508d5a2" exitCode=2 Jan 28 17:01:38 crc kubenswrapper[4877]: I0128 17:01:38.241218 4877 generic.go:334] "Generic (PLEG): container finished" podID="747d3fca-be49-419c-b42f-e746edee5eda" containerID="5a80b0963b889c7583031bd7eabe06771d3302929136286d0e3ffbd8e46de522" exitCode=0 Jan 28 17:01:38 crc kubenswrapper[4877]: I0128 17:01:38.241248 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerDied","Data":"e1a954fc4dec44449b637dbe069f2a5504ea7a74d9787dbddccf239fcaea4f21"} Jan 28 17:01:38 crc kubenswrapper[4877]: I0128 17:01:38.241396 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerDied","Data":"e564c705618b35908169daca5c07bc8002963487b4e0b47ffc0c6e119508d5a2"} Jan 28 17:01:38 crc kubenswrapper[4877]: I0128 17:01:38.241447 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerDied","Data":"5a80b0963b889c7583031bd7eabe06771d3302929136286d0e3ffbd8e46de522"} Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.646386 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.647007 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-log" containerID="cri-o://bb72620583d5ff8ef94f423a3e46172d9d8580295b24de01813a26858b551360" gracePeriod=30 Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.647400 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-httpd" containerID="cri-o://d4993471e4e07184c25eddffffbc1a8d80e15615dfb77573261798207c61721e" gracePeriod=30 Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.694843 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-6c96dc67d-f972b"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.696792 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.701224 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.701547 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.701867 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-tj2bg" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.728362 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c96dc67d-f972b"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.751239 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-combined-ca-bundle\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.751344 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data-custom\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.751436 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkgqg\" (UniqueName: \"kubernetes.io/projected/97c0facc-4ffb-4f83-86aa-68681d7c3661-kube-api-access-gkgqg\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.751592 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.804037 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78d5585959-hndnh"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.806006 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.816853 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78d5585959-hndnh"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.851812 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7486d7b6df-vf9q9"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.858972 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.865596 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866033 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4j6s\" (UniqueName: \"kubernetes.io/projected/fc2f618a-c56e-4c06-a365-be3073f2c2ae-kube-api-access-g4j6s\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866092 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866382 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-combined-ca-bundle\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866470 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data-custom\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866510 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-swift-storage-0\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866563 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-config\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866629 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-nb\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866659 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkgqg\" (UniqueName: \"kubernetes.io/projected/97c0facc-4ffb-4f83-86aa-68681d7c3661-kube-api-access-gkgqg\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866730 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-sb\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.866760 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-svc\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.874510 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7486d7b6df-vf9q9"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.879505 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-combined-ca-bundle\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.883612 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.917210 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data-custom\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.925467 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkgqg\" (UniqueName: \"kubernetes.io/projected/97c0facc-4ffb-4f83-86aa-68681d7c3661-kube-api-access-gkgqg\") pod \"heat-engine-6c96dc67d-f972b\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.926850 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-66b66545b5-ldnrl"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.930077 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.938848 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.939035 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-66b66545b5-ldnrl"] Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.972663 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfxzv\" (UniqueName: \"kubernetes.io/projected/8355c6a7-af56-4b68-bd65-560a99273480-kube-api-access-dfxzv\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.972717 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-swift-storage-0\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.972745 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-config\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.972775 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.972825 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qjhk\" (UniqueName: \"kubernetes.io/projected/db1f5546-0eed-4bf4-bd25-065718c91a46-kube-api-access-5qjhk\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.972845 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-nb\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973068 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-sb\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973181 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-svc\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973279 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4j6s\" (UniqueName: \"kubernetes.io/projected/fc2f618a-c56e-4c06-a365-be3073f2c2ae-kube-api-access-g4j6s\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973377 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-combined-ca-bundle\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973406 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-combined-ca-bundle\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973437 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973532 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-swift-storage-0\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973611 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data-custom\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.973645 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data-custom\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.974059 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-svc\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.974574 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-nb\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.975138 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-config\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.975617 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-sb\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:39 crc kubenswrapper[4877]: I0128 17:01:39.996693 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4j6s\" (UniqueName: \"kubernetes.io/projected/fc2f618a-c56e-4c06-a365-be3073f2c2ae-kube-api-access-g4j6s\") pod \"dnsmasq-dns-78d5585959-hndnh\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.037378 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.075312 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qjhk\" (UniqueName: \"kubernetes.io/projected/db1f5546-0eed-4bf4-bd25-065718c91a46-kube-api-access-5qjhk\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.075488 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-combined-ca-bundle\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.075512 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-combined-ca-bundle\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.075532 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.075635 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data-custom\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.075659 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data-custom\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.085632 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfxzv\" (UniqueName: \"kubernetes.io/projected/8355c6a7-af56-4b68-bd65-560a99273480-kube-api-access-dfxzv\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.085750 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.090908 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data-custom\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.099352 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-combined-ca-bundle\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.100969 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qjhk\" (UniqueName: \"kubernetes.io/projected/db1f5546-0eed-4bf4-bd25-065718c91a46-kube-api-access-5qjhk\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.101027 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-combined-ca-bundle\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.100976 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.106077 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.114634 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data-custom\") pod \"heat-api-66b66545b5-ldnrl\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.116846 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfxzv\" (UniqueName: \"kubernetes.io/projected/8355c6a7-af56-4b68-bd65-560a99273480-kube-api-access-dfxzv\") pod \"heat-cfnapi-7486d7b6df-vf9q9\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.166284 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.273506 4877 generic.go:334] "Generic (PLEG): container finished" podID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerID="bb72620583d5ff8ef94f423a3e46172d9d8580295b24de01813a26858b551360" exitCode=143 Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.273552 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7","Type":"ContainerDied","Data":"bb72620583d5ff8ef94f423a3e46172d9d8580295b24de01813a26858b551360"} Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.346652 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:01:40 crc kubenswrapper[4877]: I0128 17:01:40.359525 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:01:41 crc kubenswrapper[4877]: I0128 17:01:41.958968 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-ph4bn"] Jan 28 17:01:41 crc kubenswrapper[4877]: I0128 17:01:41.962520 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:41 crc kubenswrapper[4877]: I0128 17:01:41.992760 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ph4bn"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.042028 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.199:3000/\": dial tcp 10.217.0.199:3000: connect: connection refused" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.053400 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvpck\" (UniqueName: \"kubernetes.io/projected/764294ed-9715-4d81-b7b6-50a4104630fd-kube-api-access-gvpck\") pod \"nova-api-db-create-ph4bn\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.053442 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764294ed-9715-4d81-b7b6-50a4104630fd-operator-scripts\") pod \"nova-api-db-create-ph4bn\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.067988 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-tkw8b"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.069545 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.112087 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-tkw8b"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.156017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvpck\" (UniqueName: \"kubernetes.io/projected/764294ed-9715-4d81-b7b6-50a4104630fd-kube-api-access-gvpck\") pod \"nova-api-db-create-ph4bn\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.156066 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764294ed-9715-4d81-b7b6-50a4104630fd-operator-scripts\") pod \"nova-api-db-create-ph4bn\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.156141 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c75c24-13dd-439a-9245-c02f9e6d8ec7-operator-scripts\") pod \"nova-cell0-db-create-tkw8b\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.156183 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmbsp\" (UniqueName: \"kubernetes.io/projected/19c75c24-13dd-439a-9245-c02f9e6d8ec7-kube-api-access-hmbsp\") pod \"nova-cell0-db-create-tkw8b\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.157320 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764294ed-9715-4d81-b7b6-50a4104630fd-operator-scripts\") pod \"nova-api-db-create-ph4bn\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.195533 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c6e0-account-create-update-stjf9"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.197039 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.214986 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.216428 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvpck\" (UniqueName: \"kubernetes.io/projected/764294ed-9715-4d81-b7b6-50a4104630fd-kube-api-access-gvpck\") pod \"nova-api-db-create-ph4bn\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.216915 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c6e0-account-create-update-stjf9"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.249210 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-dfxx8"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.250925 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.257982 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqchl\" (UniqueName: \"kubernetes.io/projected/59e30121-a8ea-4d16-8b90-659a6158def9-kube-api-access-qqchl\") pod \"nova-api-c6e0-account-create-update-stjf9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.258071 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59e30121-a8ea-4d16-8b90-659a6158def9-operator-scripts\") pod \"nova-api-c6e0-account-create-update-stjf9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.258134 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c75c24-13dd-439a-9245-c02f9e6d8ec7-operator-scripts\") pod \"nova-cell0-db-create-tkw8b\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.258195 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmbsp\" (UniqueName: \"kubernetes.io/projected/19c75c24-13dd-439a-9245-c02f9e6d8ec7-kube-api-access-hmbsp\") pod \"nova-cell0-db-create-tkw8b\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.259468 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c75c24-13dd-439a-9245-c02f9e6d8ec7-operator-scripts\") pod \"nova-cell0-db-create-tkw8b\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.285040 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dfxx8"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.286700 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.290845 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmbsp\" (UniqueName: \"kubernetes.io/projected/19c75c24-13dd-439a-9245-c02f9e6d8ec7-kube-api-access-hmbsp\") pod \"nova-cell0-db-create-tkw8b\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.360032 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqchl\" (UniqueName: \"kubernetes.io/projected/59e30121-a8ea-4d16-8b90-659a6158def9-kube-api-access-qqchl\") pod \"nova-api-c6e0-account-create-update-stjf9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.360096 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45lv7\" (UniqueName: \"kubernetes.io/projected/7749a4b7-d909-4b9a-ae04-acfcf29f916c-kube-api-access-45lv7\") pod \"nova-cell1-db-create-dfxx8\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.360146 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59e30121-a8ea-4d16-8b90-659a6158def9-operator-scripts\") pod \"nova-api-c6e0-account-create-update-stjf9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.360219 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7749a4b7-d909-4b9a-ae04-acfcf29f916c-operator-scripts\") pod \"nova-cell1-db-create-dfxx8\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.363801 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59e30121-a8ea-4d16-8b90-659a6158def9-operator-scripts\") pod \"nova-api-c6e0-account-create-update-stjf9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.374106 4877 generic.go:334] "Generic (PLEG): container finished" podID="747d3fca-be49-419c-b42f-e746edee5eda" containerID="a253b8d257bbecd20dab1453c8b4b425474c70e564c97034f52f7ed912c9aadf" exitCode=0 Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.374158 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerDied","Data":"a253b8d257bbecd20dab1453c8b4b425474c70e564c97034f52f7ed912c9aadf"} Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.390643 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-71e2-account-create-update-xnpjr"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.392536 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.394558 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqchl\" (UniqueName: \"kubernetes.io/projected/59e30121-a8ea-4d16-8b90-659a6158def9-kube-api-access-qqchl\") pod \"nova-api-c6e0-account-create-update-stjf9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.405794 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.416644 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.432547 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-71e2-account-create-update-xnpjr"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.461995 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7749a4b7-d909-4b9a-ae04-acfcf29f916c-operator-scripts\") pod \"nova-cell1-db-create-dfxx8\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.462125 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-operator-scripts\") pod \"nova-cell0-71e2-account-create-update-xnpjr\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.462217 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-758j6\" (UniqueName: \"kubernetes.io/projected/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-kube-api-access-758j6\") pod \"nova-cell0-71e2-account-create-update-xnpjr\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.462254 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45lv7\" (UniqueName: \"kubernetes.io/projected/7749a4b7-d909-4b9a-ae04-acfcf29f916c-kube-api-access-45lv7\") pod \"nova-cell1-db-create-dfxx8\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.464555 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7749a4b7-d909-4b9a-ae04-acfcf29f916c-operator-scripts\") pod \"nova-cell1-db-create-dfxx8\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.495165 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45lv7\" (UniqueName: \"kubernetes.io/projected/7749a4b7-d909-4b9a-ae04-acfcf29f916c-kube-api-access-45lv7\") pod \"nova-cell1-db-create-dfxx8\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.571914 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-operator-scripts\") pod \"nova-cell0-71e2-account-create-update-xnpjr\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.572216 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-758j6\" (UniqueName: \"kubernetes.io/projected/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-kube-api-access-758j6\") pod \"nova-cell0-71e2-account-create-update-xnpjr\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.573324 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-operator-scripts\") pod \"nova-cell0-71e2-account-create-update-xnpjr\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.596573 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.611931 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-758j6\" (UniqueName: \"kubernetes.io/projected/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-kube-api-access-758j6\") pod \"nova-cell0-71e2-account-create-update-xnpjr\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.617651 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-417f-account-create-update-d9sds"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.627964 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.631350 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.671283 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-417f-account-create-update-d9sds"] Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.675688 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.777204 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.778704 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gjfn\" (UniqueName: \"kubernetes.io/projected/11c0eeb0-1466-4faf-ae33-e74028802131-kube-api-access-5gjfn\") pod \"nova-cell1-417f-account-create-update-d9sds\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.778912 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11c0eeb0-1466-4faf-ae33-e74028802131-operator-scripts\") pod \"nova-cell1-417f-account-create-update-d9sds\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.880952 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gjfn\" (UniqueName: \"kubernetes.io/projected/11c0eeb0-1466-4faf-ae33-e74028802131-kube-api-access-5gjfn\") pod \"nova-cell1-417f-account-create-update-d9sds\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.881064 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11c0eeb0-1466-4faf-ae33-e74028802131-operator-scripts\") pod \"nova-cell1-417f-account-create-update-d9sds\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.881828 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11c0eeb0-1466-4faf-ae33-e74028802131-operator-scripts\") pod \"nova-cell1-417f-account-create-update-d9sds\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:42 crc kubenswrapper[4877]: I0128 17:01:42.897990 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gjfn\" (UniqueName: \"kubernetes.io/projected/11c0eeb0-1466-4faf-ae33-e74028802131-kube-api-access-5gjfn\") pod \"nova-cell1-417f-account-create-update-d9sds\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:43 crc kubenswrapper[4877]: I0128 17:01:43.003094 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:43 crc kubenswrapper[4877]: I0128 17:01:43.225911 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.198:9292/healthcheck\": read tcp 10.217.0.2:43986->10.217.0.198:9292: read: connection reset by peer" Jan 28 17:01:43 crc kubenswrapper[4877]: I0128 17:01:43.225963 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.198:9292/healthcheck\": read tcp 10.217.0.2:43982->10.217.0.198:9292: read: connection reset by peer" Jan 28 17:01:43 crc kubenswrapper[4877]: I0128 17:01:43.394047 4877 generic.go:334] "Generic (PLEG): container finished" podID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerID="d4993471e4e07184c25eddffffbc1a8d80e15615dfb77573261798207c61721e" exitCode=0 Jan 28 17:01:43 crc kubenswrapper[4877]: I0128 17:01:43.394215 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7","Type":"ContainerDied","Data":"d4993471e4e07184c25eddffffbc1a8d80e15615dfb77573261798207c61721e"} Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.112600 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4wxn6" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.212546 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7254g\" (UniqueName: \"kubernetes.io/projected/dc6b6b48-855c-412b-af8b-be4c27962c4b-kube-api-access-7254g\") pod \"dc6b6b48-855c-412b-af8b-be4c27962c4b\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.212934 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-combined-ca-bundle\") pod \"dc6b6b48-855c-412b-af8b-be4c27962c4b\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.213141 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-config\") pod \"dc6b6b48-855c-412b-af8b-be4c27962c4b\" (UID: \"dc6b6b48-855c-412b-af8b-be4c27962c4b\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.216376 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc6b6b48-855c-412b-af8b-be4c27962c4b-kube-api-access-7254g" (OuterVolumeSpecName: "kube-api-access-7254g") pod "dc6b6b48-855c-412b-af8b-be4c27962c4b" (UID: "dc6b6b48-855c-412b-af8b-be4c27962c4b"). InnerVolumeSpecName "kube-api-access-7254g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.282705 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc6b6b48-855c-412b-af8b-be4c27962c4b" (UID: "dc6b6b48-855c-412b-af8b-be4c27962c4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.301422 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-config" (OuterVolumeSpecName: "config") pod "dc6b6b48-855c-412b-af8b-be4c27962c4b" (UID: "dc6b6b48-855c-412b-af8b-be4c27962c4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.316298 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.316337 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc6b6b48-855c-412b-af8b-be4c27962c4b-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.316352 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7254g\" (UniqueName: \"kubernetes.io/projected/dc6b6b48-855c-412b-af8b-be4c27962c4b-kube-api-access-7254g\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.439526 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-4wxn6" event={"ID":"dc6b6b48-855c-412b-af8b-be4c27962c4b","Type":"ContainerDied","Data":"7a22b6ef02d7c08dd5eb99ede0d92690c0f15eca6645efa469f7374f3339dcb6"} Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.439561 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a22b6ef02d7c08dd5eb99ede0d92690c0f15eca6645efa469f7374f3339dcb6" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.439623 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-4wxn6" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.846808 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.941883 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data\") pod \"b043f173-17ee-445b-a64b-ac750304a5ff\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.942014 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-scripts\") pod \"b043f173-17ee-445b-a64b-ac750304a5ff\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.942052 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtnq9\" (UniqueName: \"kubernetes.io/projected/b043f173-17ee-445b-a64b-ac750304a5ff-kube-api-access-xtnq9\") pod \"b043f173-17ee-445b-a64b-ac750304a5ff\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.942150 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b043f173-17ee-445b-a64b-ac750304a5ff-etc-machine-id\") pod \"b043f173-17ee-445b-a64b-ac750304a5ff\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.942169 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data-custom\") pod \"b043f173-17ee-445b-a64b-ac750304a5ff\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.942217 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-combined-ca-bundle\") pod \"b043f173-17ee-445b-a64b-ac750304a5ff\" (UID: \"b043f173-17ee-445b-a64b-ac750304a5ff\") " Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.942635 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b043f173-17ee-445b-a64b-ac750304a5ff-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b043f173-17ee-445b-a64b-ac750304a5ff" (UID: "b043f173-17ee-445b-a64b-ac750304a5ff"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.949555 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b043f173-17ee-445b-a64b-ac750304a5ff" (UID: "b043f173-17ee-445b-a64b-ac750304a5ff"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.949709 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-scripts" (OuterVolumeSpecName: "scripts") pod "b043f173-17ee-445b-a64b-ac750304a5ff" (UID: "b043f173-17ee-445b-a64b-ac750304a5ff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:44 crc kubenswrapper[4877]: I0128 17:01:44.955856 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b043f173-17ee-445b-a64b-ac750304a5ff-kube-api-access-xtnq9" (OuterVolumeSpecName: "kube-api-access-xtnq9") pod "b043f173-17ee-445b-a64b-ac750304a5ff" (UID: "b043f173-17ee-445b-a64b-ac750304a5ff"). InnerVolumeSpecName "kube-api-access-xtnq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.045643 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.045680 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtnq9\" (UniqueName: \"kubernetes.io/projected/b043f173-17ee-445b-a64b-ac750304a5ff-kube-api-access-xtnq9\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.045691 4877 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b043f173-17ee-445b-a64b-ac750304a5ff-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.045700 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.079353 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b043f173-17ee-445b-a64b-ac750304a5ff" (UID: "b043f173-17ee-445b-a64b-ac750304a5ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.148348 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.170059 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data" (OuterVolumeSpecName: "config-data") pod "b043f173-17ee-445b-a64b-ac750304a5ff" (UID: "b043f173-17ee-445b-a64b-ac750304a5ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.262284 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b043f173-17ee-445b-a64b-ac750304a5ff-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.450432 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78d5585959-hndnh"] Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.459227 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e5410b8c-cff8-4df4-885f-e550cf3d6dfd","Type":"ContainerStarted","Data":"bf01e5de3d292fc1a8b95ff99ef63a8b23f5257f9776561a882557c3f0622675"} Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.467421 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b043f173-17ee-445b-a64b-ac750304a5ff","Type":"ContainerDied","Data":"85a8d185293306515436187d8586e61c25c0a813a21e5be2835cc48995f2f6c0"} Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.467575 4877 scope.go:117] "RemoveContainer" containerID="0fc1e365f3c31c663d06a833d418b716b9b1ab21f78c52542861b21aab96e9c6" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.467718 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.504248 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7574dd45d-jbx2q"] Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.504962 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="probe" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.504978 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="probe" Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.504996 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6b6b48-855c-412b-af8b-be4c27962c4b" containerName="neutron-db-sync" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.505011 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6b6b48-855c-412b-af8b-be4c27962c4b" containerName="neutron-db-sync" Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.505037 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="cinder-scheduler" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.505044 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="cinder-scheduler" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.505263 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="probe" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.505279 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" containerName="cinder-scheduler" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.505318 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc6b6b48-855c-412b-af8b-be4c27962c4b" containerName="neutron-db-sync" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.506618 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.510205 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.512653 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.512947 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-bcjxh" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.523247 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.524286 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.643982417 podStartE2EDuration="15.524264484s" podCreationTimestamp="2026-01-28 17:01:30 +0000 UTC" firstStartedPulling="2026-01-28 17:01:31.327368809 +0000 UTC m=+1594.885695697" lastFinishedPulling="2026-01-28 17:01:44.207650876 +0000 UTC m=+1607.765977764" observedRunningTime="2026-01-28 17:01:45.478781473 +0000 UTC m=+1609.037108361" watchObservedRunningTime="2026-01-28 17:01:45.524264484 +0000 UTC m=+1609.082591372" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.529540 4877 scope.go:117] "RemoveContainer" containerID="fff1bfd931e61d21bb174b32bda485ae4952e2a7774b40161375e0c677adb454" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.572550 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-ovndb-tls-certs\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.572838 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-config\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.572918 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-httpd-config\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.572995 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlldb\" (UniqueName: \"kubernetes.io/projected/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-kube-api-access-wlldb\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.573214 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-combined-ca-bundle\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.614550 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7574dd45d-jbx2q"] Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.637879 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-xngcd"] Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.640416 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.663664 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.677924 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678056 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678155 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678239 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-combined-ca-bundle\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678364 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-ovndb-tls-certs\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678605 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678685 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-config\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678798 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-httpd-config\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678863 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlldb\" (UniqueName: \"kubernetes.io/projected/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-kube-api-access-wlldb\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678896 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6wds\" (UniqueName: \"kubernetes.io/projected/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-kube-api-access-c6wds\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.678933 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-config\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.705699 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-ovndb-tls-certs\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.709794 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-combined-ca-bundle\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.713531 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-config\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.720291 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-httpd-config\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.739222 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlldb\" (UniqueName: \"kubernetes.io/projected/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-kube-api-access-wlldb\") pod \"neutron-7574dd45d-jbx2q\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.784045 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6wds\" (UniqueName: \"kubernetes.io/projected/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-kube-api-access-c6wds\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.784320 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-config\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.784564 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.784712 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.790023 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.787569 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.792539 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.797397 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.810564 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.815777 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-config\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.819247 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.828385 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.841963 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6wds\" (UniqueName: \"kubernetes.io/projected/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-kube-api-access-c6wds\") pod \"dnsmasq-dns-f6bc4c6c9-xngcd\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.847763 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.946675 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947111 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947290 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.947825 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-central-agent" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947856 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-central-agent" Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.947872 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-httpd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947879 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-httpd" Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.947900 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-notification-agent" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947907 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-notification-agent" Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.947923 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="sg-core" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947929 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="sg-core" Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.947949 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="proxy-httpd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947955 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="proxy-httpd" Jan 28 17:01:45 crc kubenswrapper[4877]: E0128 17:01:45.947964 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-log" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.947970 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-log" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.948171 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="proxy-httpd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.948181 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-httpd" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.948196 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-notification-agent" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.948206 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" containerName="glance-log" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.948218 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="ceilometer-central-agent" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.948231 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="747d3fca-be49-419c-b42f-e746edee5eda" containerName="sg-core" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.949604 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.952125 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.971785 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-xngcd"] Jan 28 17:01:45 crc kubenswrapper[4877]: I0128 17:01:45.994874 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.035916 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133288 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-logs\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133582 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-combined-ca-bundle\") pod \"747d3fca-be49-419c-b42f-e746edee5eda\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133631 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-config-data\") pod \"747d3fca-be49-419c-b42f-e746edee5eda\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133650 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-log-httpd\") pod \"747d3fca-be49-419c-b42f-e746edee5eda\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133709 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-config-data\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133730 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-combined-ca-bundle\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133816 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-scripts\") pod \"747d3fca-be49-419c-b42f-e746edee5eda\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133872 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-internal-tls-certs\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133894 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-sg-core-conf-yaml\") pod \"747d3fca-be49-419c-b42f-e746edee5eda\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.133945 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vmg9\" (UniqueName: \"kubernetes.io/projected/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-kube-api-access-2vmg9\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134044 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-run-httpd\") pod \"747d3fca-be49-419c-b42f-e746edee5eda\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134209 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134227 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-httpd-run\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134282 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-scripts\") pod \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\" (UID: \"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134311 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhp8f\" (UniqueName: \"kubernetes.io/projected/747d3fca-be49-419c-b42f-e746edee5eda-kube-api-access-mhp8f\") pod \"747d3fca-be49-419c-b42f-e746edee5eda\" (UID: \"747d3fca-be49-419c-b42f-e746edee5eda\") " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134606 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134641 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-scripts\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134690 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/327e7593-5623-475c-ad8f-2456a437a645-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134745 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-config-data\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134784 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpwmn\" (UniqueName: \"kubernetes.io/projected/327e7593-5623-475c-ad8f-2456a437a645-kube-api-access-gpwmn\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134910 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.134961 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "747d3fca-be49-419c-b42f-e746edee5eda" (UID: "747d3fca-be49-419c-b42f-e746edee5eda"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.136156 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-logs" (OuterVolumeSpecName: "logs") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.139066 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.140082 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "747d3fca-be49-419c-b42f-e746edee5eda" (UID: "747d3fca-be49-419c-b42f-e746edee5eda"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.141601 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-scripts" (OuterVolumeSpecName: "scripts") pod "747d3fca-be49-419c-b42f-e746edee5eda" (UID: "747d3fca-be49-419c-b42f-e746edee5eda"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.145886 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-scripts" (OuterVolumeSpecName: "scripts") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.150678 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-kube-api-access-2vmg9" (OuterVolumeSpecName: "kube-api-access-2vmg9") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "kube-api-access-2vmg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.153730 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/747d3fca-be49-419c-b42f-e746edee5eda-kube-api-access-mhp8f" (OuterVolumeSpecName: "kube-api-access-mhp8f") pod "747d3fca-be49-419c-b42f-e746edee5eda" (UID: "747d3fca-be49-419c-b42f-e746edee5eda"). InnerVolumeSpecName "kube-api-access-mhp8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.177264 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409" (OuterVolumeSpecName: "glance") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.254042 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.255881 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-scripts\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.255991 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/327e7593-5623-475c-ad8f-2456a437a645-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.256087 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-config-data\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.256154 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpwmn\" (UniqueName: \"kubernetes.io/projected/327e7593-5623-475c-ad8f-2456a437a645-kube-api-access-gpwmn\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.256391 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.256655 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.256746 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.258834 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhp8f\" (UniqueName: \"kubernetes.io/projected/747d3fca-be49-419c-b42f-e746edee5eda-kube-api-access-mhp8f\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.258880 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.258894 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.258908 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.258932 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.258951 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vmg9\" (UniqueName: \"kubernetes.io/projected/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-kube-api-access-2vmg9\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.258977 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/747d3fca-be49-419c-b42f-e746edee5eda-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.259014 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") on node \"crc\" " Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.259027 4877 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.261103 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/327e7593-5623-475c-ad8f-2456a437a645-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.285527 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.287837 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "747d3fca-be49-419c-b42f-e746edee5eda" (UID: "747d3fca-be49-419c-b42f-e746edee5eda"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.293068 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-scripts\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.296457 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-config-data\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.297757 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpwmn\" (UniqueName: \"kubernetes.io/projected/327e7593-5623-475c-ad8f-2456a437a645-kube-api-access-gpwmn\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.300768 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-config-data" (OuterVolumeSpecName: "config-data") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.301323 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/327e7593-5623-475c-ad8f-2456a437a645-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"327e7593-5623-475c-ad8f-2456a437a645\") " pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.310366 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.353954 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" (UID: "6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.374904 4877 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.385627 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.385744 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.406873 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.407352 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409") on node "crc" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.454678 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-config-data" (OuterVolumeSpecName: "config-data") pod "747d3fca-be49-419c-b42f-e746edee5eda" (UID: "747d3fca-be49-419c-b42f-e746edee5eda"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.500387 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.500426 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.530700 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"747d3fca-be49-419c-b42f-e746edee5eda","Type":"ContainerDied","Data":"bd6de29d42b2bdd02e0dbb00c2e90f2c51d6a842ceeb10cc62231cda83e64322"} Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.530755 4877 scope.go:117] "RemoveContainer" containerID="e1a954fc4dec44449b637dbe069f2a5504ea7a74d9787dbddccf239fcaea4f21" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.530930 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.531142 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "747d3fca-be49-419c-b42f-e746edee5eda" (UID: "747d3fca-be49-419c-b42f-e746edee5eda"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.538299 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.538980 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7","Type":"ContainerDied","Data":"2c34a214086dc05da67ff2aa1a4ccb116b31f3ce56731382affa08860fa0888a"} Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.593575 4877 scope.go:117] "RemoveContainer" containerID="e564c705618b35908169daca5c07bc8002963487b4e0b47ffc0c6e119508d5a2" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.611831 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/747d3fca-be49-419c-b42f-e746edee5eda-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.661685 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.686275 4877 scope.go:117] "RemoveContainer" containerID="a253b8d257bbecd20dab1453c8b4b425474c70e564c97034f52f7ed912c9aadf" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.696033 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.716836 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.733376 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.739980 4877 scope.go:117] "RemoveContainer" containerID="5a80b0963b889c7583031bd7eabe06771d3302929136286d0e3ffbd8e46de522" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.747983 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.750797 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.756912 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.760109 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.764884 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.782631 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.785285 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.791172 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.791688 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.794973 4877 scope.go:117] "RemoveContainer" containerID="d4993471e4e07184c25eddffffbc1a8d80e15615dfb77573261798207c61721e" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.815952 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.817535 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-run-httpd\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.817622 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-log-httpd\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.817690 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.817817 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.818029 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-scripts\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.818051 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-config-data\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.818160 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj8w2\" (UniqueName: \"kubernetes.io/projected/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-kube-api-access-mj8w2\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.848650 4877 scope.go:117] "RemoveContainer" containerID="bb72620583d5ff8ef94f423a3e46172d9d8580295b24de01813a26858b551360" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924012 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-log-httpd\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924142 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924193 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924267 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924313 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924375 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-scripts\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924398 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-config-data\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924430 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924492 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924528 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj8w2\" (UniqueName: \"kubernetes.io/projected/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-kube-api-access-mj8w2\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924550 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3351bc4c-503c-4f4c-8a88-af46d5102724-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924618 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924650 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrj7g\" (UniqueName: \"kubernetes.io/projected/3351bc4c-503c-4f4c-8a88-af46d5102724-kube-api-access-rrj7g\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924784 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351bc4c-503c-4f4c-8a88-af46d5102724-logs\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.924873 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-run-httpd\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.925466 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-run-httpd\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.925742 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-log-httpd\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.931642 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.931910 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.936568 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-config-data\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.936633 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-scripts\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:46 crc kubenswrapper[4877]: I0128 17:01:46.970315 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj8w2\" (UniqueName: \"kubernetes.io/projected/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-kube-api-access-mj8w2\") pod \"ceilometer-0\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " pod="openstack/ceilometer-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.028718 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.028794 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.028882 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.028933 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.028964 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3351bc4c-503c-4f4c-8a88-af46d5102724-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.029036 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.029068 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrj7g\" (UniqueName: \"kubernetes.io/projected/3351bc4c-503c-4f4c-8a88-af46d5102724-kube-api-access-rrj7g\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.029209 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351bc4c-503c-4f4c-8a88-af46d5102724-logs\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.029639 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3351bc4c-503c-4f4c-8a88-af46d5102724-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.032023 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3351bc4c-503c-4f4c-8a88-af46d5102724-logs\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.034029 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.034768 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.034815 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.034838 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c970fd9b051e1e4708c5b978fbab2178e7872a320f8059867f5cd332a890e640/globalmount\"" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.035521 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.035904 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3351bc4c-503c-4f4c-8a88-af46d5102724-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.045001 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrj7g\" (UniqueName: \"kubernetes.io/projected/3351bc4c-503c-4f4c-8a88-af46d5102724-kube-api-access-rrj7g\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.105936 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b122ff1c-96b7-4d71-9eee-a51c634fe409\") pod \"glance-default-internal-api-0\" (UID: \"3351bc4c-503c-4f4c-8a88-af46d5102724\") " pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.139935 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.203610 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7486d7b6df-vf9q9"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.218283 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ph4bn"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.243665 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-tkw8b"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.267683 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78d5585959-hndnh"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.305697 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c96dc67d-f972b"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.401046 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:47 crc kubenswrapper[4877]: W0128 17:01:47.427535 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7749a4b7_d909_4b9a_ae04_acfcf29f916c.slice/crio-85ffb95d986f1c7f0a9d34e7947c1229e2f35587ca261b47a511649706988966 WatchSource:0}: Error finding container 85ffb95d986f1c7f0a9d34e7947c1229e2f35587ca261b47a511649706988966: Status 404 returned error can't find the container with id 85ffb95d986f1c7f0a9d34e7947c1229e2f35587ca261b47a511649706988966 Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.535207 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7" path="/var/lib/kubelet/pods/6f3ffe6b-6f0a-423b-8d5f-8a0da0ec8ee7/volumes" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.548538 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="747d3fca-be49-419c-b42f-e746edee5eda" path="/var/lib/kubelet/pods/747d3fca-be49-419c-b42f-e746edee5eda/volumes" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.550271 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b043f173-17ee-445b-a64b-ac750304a5ff" path="/var/lib/kubelet/pods/b043f173-17ee-445b-a64b-ac750304a5ff/volumes" Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.551636 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dfxx8"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.551708 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-66b66545b5-ldnrl"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.551771 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c6e0-account-create-update-stjf9"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.557366 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-86b8867947-jkp8w"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.562611 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-86b8867947-jkp8w" event={"ID":"98234314-b081-449f-b87c-b562dd7eb209","Type":"ContainerStarted","Data":"0321a0fcc14b38cd2748742a2d2688930bbc893ad137d4023a36700c20498222"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.582851 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c6e0-account-create-update-stjf9" event={"ID":"59e30121-a8ea-4d16-8b90-659a6158def9","Type":"ContainerStarted","Data":"7c4e86559f3a902848325217f06e662f0a11c6ad24eb2c655b0c57dc88fb46c9"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.587163 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-tkw8b" event={"ID":"19c75c24-13dd-439a-9245-c02f9e6d8ec7","Type":"ContainerStarted","Data":"c5f94d5701d3d95b9ba1b2b2f1be2040d5559111dacc16c1711b8e03ceb4af49"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.594977 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66b66545b5-ldnrl" event={"ID":"db1f5546-0eed-4bf4-bd25-065718c91a46","Type":"ContainerStarted","Data":"dcde4862f2dfde1457c90c189c7a6f2913426d4c6f866e4a8e2a8d2c3192910b"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.600513 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dfxx8" event={"ID":"7749a4b7-d909-4b9a-ae04-acfcf29f916c","Type":"ContainerStarted","Data":"85ffb95d986f1c7f0a9d34e7947c1229e2f35587ca261b47a511649706988966"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.604586 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" event={"ID":"8355c6a7-af56-4b68-bd65-560a99273480","Type":"ContainerStarted","Data":"888f1b55fc6b7fd67c0a4b1f09a42575075e30cb1a53082038275a86d8587c48"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.608859 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c96dc67d-f972b" event={"ID":"97c0facc-4ffb-4f83-86aa-68681d7c3661","Type":"ContainerStarted","Data":"b1b047655a6be1c655a0b1a0802b1e9b08949a769925c8080d76bc0e4d598216"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.614728 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ph4bn" event={"ID":"764294ed-9715-4d81-b7b6-50a4104630fd","Type":"ContainerStarted","Data":"3372f4134f4c595ad705725fd5d4e2786566aba28d01d920e526c39c1b7bad47"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.621932 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d5585959-hndnh" event={"ID":"fc2f618a-c56e-4c06-a365-be3073f2c2ae","Type":"ContainerStarted","Data":"93a55b32eedc48cf6de915dc47866715b62a5e249d7695b478f8a07f676dc280"} Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.695576 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-71e2-account-create-update-xnpjr"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.724003 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-417f-account-create-update-d9sds"] Jan 28 17:01:47 crc kubenswrapper[4877]: I0128 17:01:47.981900 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-xngcd"] Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.010778 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.142500 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7574dd45d-jbx2q"] Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.323863 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.513068 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 17:01:48 crc kubenswrapper[4877]: W0128 17:01:48.640954 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3351bc4c_503c_4f4c_8a88_af46d5102724.slice/crio-d26dfd8eb3d170a24e7fdfa9ec1f815a9b2882f5bea5076f245cf721e8ec80ff WatchSource:0}: Error finding container d26dfd8eb3d170a24e7fdfa9ec1f815a9b2882f5bea5076f245cf721e8ec80ff: Status 404 returned error can't find the container with id d26dfd8eb3d170a24e7fdfa9ec1f815a9b2882f5bea5076f245cf721e8ec80ff Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.699220 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" event={"ID":"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5","Type":"ContainerStarted","Data":"8b1d4cfacd7a8f7d72e97ad8a7e7a03c2d15dd3037595b1df47cb34cbece7ab7"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.706146 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-417f-account-create-update-d9sds" event={"ID":"11c0eeb0-1466-4faf-ae33-e74028802131","Type":"ContainerStarted","Data":"0e8415095e34b4985101316442a8ff00882ed4ddedadb8c46da19cb706194b0c"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.713120 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"327e7593-5623-475c-ad8f-2456a437a645","Type":"ContainerStarted","Data":"b3567c2e096286582907e59b78edb8b3ccb1cd2dd4554d8ed0d3815e16507d32"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.717331 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerStarted","Data":"83af15d5ecdab9a4e039d16c1810b4ad5dbdcc5cce2d39c03a4821af35b9a097"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.725900 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7574dd45d-jbx2q" event={"ID":"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f","Type":"ContainerStarted","Data":"8e4a99d03e7ab3fd00f0f089be6439d42607d29eebfa3f1e4a4c01d8f01e8415"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.730305 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ph4bn" event={"ID":"764294ed-9715-4d81-b7b6-50a4104630fd","Type":"ContainerStarted","Data":"2c34628420e9aad56823dc99220a1a6b33d83ce51784eabcd61d4e4965c7cf31"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.734703 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-tkw8b" event={"ID":"19c75c24-13dd-439a-9245-c02f9e6d8ec7","Type":"ContainerStarted","Data":"cdc5f8e73929f9f07db943b054d39c565bf0723db74a347c03ddb7c4048b43d3"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.738592 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3351bc4c-503c-4f4c-8a88-af46d5102724","Type":"ContainerStarted","Data":"d26dfd8eb3d170a24e7fdfa9ec1f815a9b2882f5bea5076f245cf721e8ec80ff"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.741306 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" event={"ID":"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5","Type":"ContainerStarted","Data":"1ea38fc69c7b481dec51d59ba0201d5e7212d5abdeda5f26a0fea425e607ef08"} Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.782734 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-ph4bn" podStartSLOduration=7.782702824 podStartE2EDuration="7.782702824s" podCreationTimestamp="2026-01-28 17:01:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:48.754767534 +0000 UTC m=+1612.313094422" watchObservedRunningTime="2026-01-28 17:01:48.782702824 +0000 UTC m=+1612.341029712" Jan 28 17:01:48 crc kubenswrapper[4877]: I0128 17:01:48.840710 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-tkw8b" podStartSLOduration=6.84067629 podStartE2EDuration="6.84067629s" podCreationTimestamp="2026-01-28 17:01:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:48.769730206 +0000 UTC m=+1612.328057094" watchObservedRunningTime="2026-01-28 17:01:48.84067629 +0000 UTC m=+1612.399003178" Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.332212 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:01:49 crc kubenswrapper[4877]: E0128 17:01:49.332726 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.466813 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.467326 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-log" containerID="cri-o://f42cee324b23a633e0a4278f7eafd8b837b1d3c804e14fb895f80a2336cd45b6" gracePeriod=30 Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.467468 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-httpd" containerID="cri-o://fbd861627eeb60a09876529ea4a64e3b217414fc001ef4e87aa4dc127ce7425c" gracePeriod=30 Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.774339 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c96dc67d-f972b" event={"ID":"97c0facc-4ffb-4f83-86aa-68681d7c3661","Type":"ContainerStarted","Data":"0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd"} Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.776156 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.800573 4877 generic.go:334] "Generic (PLEG): container finished" podID="764294ed-9715-4d81-b7b6-50a4104630fd" containerID="2c34628420e9aad56823dc99220a1a6b33d83ce51784eabcd61d4e4965c7cf31" exitCode=0 Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.800751 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ph4bn" event={"ID":"764294ed-9715-4d81-b7b6-50a4104630fd","Type":"ContainerDied","Data":"2c34628420e9aad56823dc99220a1a6b33d83ce51784eabcd61d4e4965c7cf31"} Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.841127 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-6c96dc67d-f972b" podStartSLOduration=10.841104249 podStartE2EDuration="10.841104249s" podCreationTimestamp="2026-01-28 17:01:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:49.800452627 +0000 UTC m=+1613.358779535" watchObservedRunningTime="2026-01-28 17:01:49.841104249 +0000 UTC m=+1613.399431147" Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.890644 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c6e0-account-create-update-stjf9" event={"ID":"59e30121-a8ea-4d16-8b90-659a6158def9","Type":"ContainerStarted","Data":"eb81e2c8faaa1999538f948fe9fa6e7d0ba86c30a6c12af6a1d62326f86b11c9"} Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.903978 4877 generic.go:334] "Generic (PLEG): container finished" podID="19c75c24-13dd-439a-9245-c02f9e6d8ec7" containerID="cdc5f8e73929f9f07db943b054d39c565bf0723db74a347c03ddb7c4048b43d3" exitCode=0 Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.904049 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-tkw8b" event={"ID":"19c75c24-13dd-439a-9245-c02f9e6d8ec7","Type":"ContainerDied","Data":"cdc5f8e73929f9f07db943b054d39c565bf0723db74a347c03ddb7c4048b43d3"} Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.908433 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-c6e0-account-create-update-stjf9" podStartSLOduration=7.908418716 podStartE2EDuration="7.908418716s" podCreationTimestamp="2026-01-28 17:01:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:49.906867464 +0000 UTC m=+1613.465194352" watchObservedRunningTime="2026-01-28 17:01:49.908418716 +0000 UTC m=+1613.466745594" Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.920727 4877 generic.go:334] "Generic (PLEG): container finished" podID="7749a4b7-d909-4b9a-ae04-acfcf29f916c" containerID="57e59cfe9b20569e5d58de104a96a90907bfa62460cbc2c52b0403ef9d58b8e5" exitCode=0 Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.920829 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dfxx8" event={"ID":"7749a4b7-d909-4b9a-ae04-acfcf29f916c","Type":"ContainerDied","Data":"57e59cfe9b20569e5d58de104a96a90907bfa62460cbc2c52b0403ef9d58b8e5"} Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.962313 4877 generic.go:334] "Generic (PLEG): container finished" podID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerID="f42cee324b23a633e0a4278f7eafd8b837b1d3c804e14fb895f80a2336cd45b6" exitCode=143 Jan 28 17:01:49 crc kubenswrapper[4877]: I0128 17:01:49.962372 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54a9533f-7d16-4422-ac68-5ff6e34ddf39","Type":"ContainerDied","Data":"f42cee324b23a633e0a4278f7eafd8b837b1d3c804e14fb895f80a2336cd45b6"} Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.738742 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6db6c6788c-7mlr2"] Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.744591 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.787052 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.787273 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.811656 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6db6c6788c-7mlr2"] Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.836600 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-internal-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.836677 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-config\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.836722 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-public-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.842286 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp76x\" (UniqueName: \"kubernetes.io/projected/2c19f87c-83c1-4c2d-97fc-f50a89480878-kube-api-access-bp76x\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.842431 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-ovndb-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.851865 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-combined-ca-bundle\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.851960 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-httpd-config\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.954565 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-internal-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.954614 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-config\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.954687 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-public-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.954759 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp76x\" (UniqueName: \"kubernetes.io/projected/2c19f87c-83c1-4c2d-97fc-f50a89480878-kube-api-access-bp76x\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.954843 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-ovndb-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.954909 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-combined-ca-bundle\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:50 crc kubenswrapper[4877]: I0128 17:01:50.954959 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-httpd-config\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.016329 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-internal-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.036046 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-public-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.048540 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp76x\" (UniqueName: \"kubernetes.io/projected/2c19f87c-83c1-4c2d-97fc-f50a89480878-kube-api-access-bp76x\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.066511 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-config\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.097051 4877 generic.go:334] "Generic (PLEG): container finished" podID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerID="ad05e20ac7e9b3f29c72ff0a8c3b5998fc01159cb585aa14c538653ec622730e" exitCode=0 Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.097818 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" event={"ID":"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5","Type":"ContainerDied","Data":"ad05e20ac7e9b3f29c72ff0a8c3b5998fc01159cb585aa14c538653ec622730e"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.104401 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-httpd-config\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.104986 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-combined-ca-bundle\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.105417 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c19f87c-83c1-4c2d-97fc-f50a89480878-ovndb-tls-certs\") pod \"neutron-6db6c6788c-7mlr2\" (UID: \"2c19f87c-83c1-4c2d-97fc-f50a89480878\") " pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.157100 4877 generic.go:334] "Generic (PLEG): container finished" podID="11c0eeb0-1466-4faf-ae33-e74028802131" containerID="ff0be326d382c6cd9d4ca6cabaab5c8e0cbe07f44a3ad3d4463877f461c4b0dc" exitCode=0 Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.157265 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-417f-account-create-update-d9sds" event={"ID":"11c0eeb0-1466-4faf-ae33-e74028802131","Type":"ContainerDied","Data":"ff0be326d382c6cd9d4ca6cabaab5c8e0cbe07f44a3ad3d4463877f461c4b0dc"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.212055 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerStarted","Data":"85e30d408e8e56325c43bfb76d65067668f2789838cbcc8a62569b11423b16df"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.215381 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-6d5767f7f9-8pfmz"] Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.251511 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.284504 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7574dd45d-jbx2q" event={"ID":"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f","Type":"ContainerStarted","Data":"cdcf2e3c702d98ab739f7b2b88774bdd56cdcc7c08b392586bf84f18ad9f5fd0"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.284548 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7574dd45d-jbx2q" event={"ID":"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f","Type":"ContainerStarted","Data":"bf410acad92d3276f75950cf794ecd8c3b21031823ac77c2d02323917bc3e376"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.286060 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.288158 4877 generic.go:334] "Generic (PLEG): container finished" podID="59e30121-a8ea-4d16-8b90-659a6158def9" containerID="eb81e2c8faaa1999538f948fe9fa6e7d0ba86c30a6c12af6a1d62326f86b11c9" exitCode=0 Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.288875 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6d5767f7f9-8pfmz"] Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.288995 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c6e0-account-create-update-stjf9" event={"ID":"59e30121-a8ea-4d16-8b90-659a6158def9","Type":"ContainerDied","Data":"eb81e2c8faaa1999538f948fe9fa6e7d0ba86c30a6c12af6a1d62326f86b11c9"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.325074 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-55d5d56894-j6z8h"] Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.326754 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbg7z\" (UniqueName: \"kubernetes.io/projected/f8dea33a-3c8f-43eb-af20-df530ec7a89d-kube-api-access-pbg7z\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.326820 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data-custom\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.326854 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.326909 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-combined-ca-bundle\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.329899 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.353980 4877 generic.go:334] "Generic (PLEG): container finished" podID="b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5" containerID="977237e45e52b07423b5f28e12b0b8956c1da080a861b7df00800244b5c454fb" exitCode=0 Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.403926 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-86b8867947-jkp8w" event={"ID":"98234314-b081-449f-b87c-b562dd7eb209","Type":"ContainerStarted","Data":"4a7cab9b51ef8f7d745eeb046aa92c9d7c7bc9feaf02142b7328a1ac053e7484"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.404383 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" event={"ID":"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5","Type":"ContainerDied","Data":"977237e45e52b07423b5f28e12b0b8956c1da080a861b7df00800244b5c454fb"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.405388 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-b5644bdb6-c7hcq"] Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.415045 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.418820 4877 generic.go:334] "Generic (PLEG): container finished" podID="fc2f618a-c56e-4c06-a365-be3073f2c2ae" containerID="f1e2fb3d963a5c5f30e269a9463e7deea2cfead9bf664200ccf82d653420eada" exitCode=0 Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.418925 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d5585959-hndnh" event={"ID":"fc2f618a-c56e-4c06-a365-be3073f2c2ae","Type":"ContainerDied","Data":"f1e2fb3d963a5c5f30e269a9463e7deea2cfead9bf664200ccf82d653420eada"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.429585 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbg7z\" (UniqueName: \"kubernetes.io/projected/f8dea33a-3c8f-43eb-af20-df530ec7a89d-kube-api-access-pbg7z\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.429707 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.429787 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data-custom\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.429835 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.429887 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-combined-ca-bundle\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.429962 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fdzz\" (UniqueName: \"kubernetes.io/projected/0d4944b1-cca6-4ace-9334-12dd2b981be8-kube-api-access-2fdzz\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.429989 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-combined-ca-bundle\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.430150 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data-custom\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.436241 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3351bc4c-503c-4f4c-8a88-af46d5102724","Type":"ContainerStarted","Data":"6f620002e85e3026c000605d85116fbacc2f0db98734a0eea520b69c70ee199c"} Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.443749 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-b5644bdb6-c7hcq"] Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.456617 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbg7z\" (UniqueName: \"kubernetes.io/projected/f8dea33a-3c8f-43eb-af20-df530ec7a89d-kube-api-access-pbg7z\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.469845 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-55d5d56894-j6z8h"] Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.477718 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.525313 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-combined-ca-bundle\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.527615 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data-custom\") pod \"heat-engine-6d5767f7f9-8pfmz\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.558167 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7574dd45d-jbx2q" podStartSLOduration=6.558137086 podStartE2EDuration="6.558137086s" podCreationTimestamp="2026-01-28 17:01:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:51.311463353 +0000 UTC m=+1614.869790241" watchObservedRunningTime="2026-01-28 17:01:51.558137086 +0000 UTC m=+1615.116463964" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.570347 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-combined-ca-bundle\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.570559 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fdzz\" (UniqueName: \"kubernetes.io/projected/0d4944b1-cca6-4ace-9334-12dd2b981be8-kube-api-access-2fdzz\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.570758 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data-custom\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.570995 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2kcl\" (UniqueName: \"kubernetes.io/projected/4de40718-80f2-48ae-8929-32a2c0e96707-kube-api-access-v2kcl\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.571064 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data-custom\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.571096 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-combined-ca-bundle\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.571195 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.571314 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.617654 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.718427 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2kcl\" (UniqueName: \"kubernetes.io/projected/4de40718-80f2-48ae-8929-32a2c0e96707-kube-api-access-v2kcl\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.733432 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data-custom\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.733496 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-combined-ca-bundle\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.733804 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.747090 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.771600 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data-custom\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.772361 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2kcl\" (UniqueName: \"kubernetes.io/projected/4de40718-80f2-48ae-8929-32a2c0e96707-kube-api-access-v2kcl\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.810903 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data-custom\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.823756 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.844090 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-combined-ca-bundle\") pod \"heat-cfnapi-b5644bdb6-c7hcq\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.882200 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.884000 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-combined-ca-bundle\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:51 crc kubenswrapper[4877]: I0128 17:01:51.892991 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fdzz\" (UniqueName: \"kubernetes.io/projected/0d4944b1-cca6-4ace-9334-12dd2b981be8-kube-api-access-2fdzz\") pod \"heat-api-55d5d56894-j6z8h\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.090032 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.111881 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.676409 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"327e7593-5623-475c-ad8f-2456a437a645","Type":"ContainerStarted","Data":"f772389235174d1da19b8526fef28b2c4ba0985cd362c88612ef554252adda2c"} Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.700828 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerStarted","Data":"e797f444d22b70e52dd0bab83d1d5e39fbf174e7de12276164ab230b18f208ae"} Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.703363 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-86b8867947-jkp8w" event={"ID":"98234314-b081-449f-b87c-b562dd7eb209","Type":"ContainerStarted","Data":"75f530c808af1fe934a9abef92254fe9bfe23ec57bb31e8714abc27bc3814565"} Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.704869 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.704897 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.723415 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" event={"ID":"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5","Type":"ContainerStarted","Data":"bfe39903a2895869c905d25d49dc367f979b5189fd0b55c77792cb7296f78e17"} Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.723742 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.781974 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-86b8867947-jkp8w" podStartSLOduration=17.781839239 podStartE2EDuration="17.781839239s" podCreationTimestamp="2026-01-28 17:01:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:52.726128913 +0000 UTC m=+1616.284455801" watchObservedRunningTime="2026-01-28 17:01:52.781839239 +0000 UTC m=+1616.340166127" Jan 28 17:01:52 crc kubenswrapper[4877]: I0128 17:01:52.827415 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" podStartSLOduration=7.827390112 podStartE2EDuration="7.827390112s" podCreationTimestamp="2026-01-28 17:01:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:52.760806144 +0000 UTC m=+1616.319133022" watchObservedRunningTime="2026-01-28 17:01:52.827390112 +0000 UTC m=+1616.385717000" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.031052 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.203:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.051733 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.363357 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7b4cc7844-8vpdw" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.556497 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6db6c6788c-7mlr2"] Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.679411 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hhmdh"] Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.685847 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.733911 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hhmdh"] Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.766378 4877 generic.go:334] "Generic (PLEG): container finished" podID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerID="fbd861627eeb60a09876529ea4a64e3b217414fc001ef4e87aa4dc127ce7425c" exitCode=0 Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.766460 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54a9533f-7d16-4422-ac68-5ff6e34ddf39","Type":"ContainerDied","Data":"fbd861627eeb60a09876529ea4a64e3b217414fc001ef4e87aa4dc127ce7425c"} Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.774652 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"327e7593-5623-475c-ad8f-2456a437a645","Type":"ContainerStarted","Data":"e3b7d92e2ca19c70c1b933f395b780b583ccb27fc9de9810dfea44fd12b90ffa"} Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.790759 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3351bc4c-503c-4f4c-8a88-af46d5102724","Type":"ContainerStarted","Data":"1243ce1f01824187f86183f1b9f1534e1d8a8d96e08f8bcd33a1f6911349f9dc"} Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.808876 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=8.808858732000001 podStartE2EDuration="8.808858732s" podCreationTimestamp="2026-01-28 17:01:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:53.80876878 +0000 UTC m=+1617.367095668" watchObservedRunningTime="2026-01-28 17:01:53.808858732 +0000 UTC m=+1617.367185620" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.817973 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pfmb\" (UniqueName: \"kubernetes.io/projected/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-kube-api-access-8pfmb\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.818134 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-utilities\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.818161 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-catalog-content\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.840776 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.840756969 podStartE2EDuration="7.840756969s" podCreationTimestamp="2026-01-28 17:01:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:01:53.834135611 +0000 UTC m=+1617.392462489" watchObservedRunningTime="2026-01-28 17:01:53.840756969 +0000 UTC m=+1617.399083857" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.921564 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pfmb\" (UniqueName: \"kubernetes.io/projected/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-kube-api-access-8pfmb\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.921989 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-utilities\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.922007 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-catalog-content\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.925466 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-utilities\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.925852 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-catalog-content\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:53 crc kubenswrapper[4877]: I0128 17:01:53.958191 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pfmb\" (UniqueName: \"kubernetes.io/projected/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-kube-api-access-8pfmb\") pod \"certified-operators-hhmdh\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:54 crc kubenswrapper[4877]: I0128 17:01:54.029703 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:01:54 crc kubenswrapper[4877]: I0128 17:01:54.815005 4877 generic.go:334] "Generic (PLEG): container finished" podID="1e203aba-b679-45b3-9987-8a63bdb556db" containerID="45d34b4d7c70dddd2467b9e38385da961dad5b64f625d55daf6fd4518f74ce84" exitCode=137 Jan 28 17:01:54 crc kubenswrapper[4877]: I0128 17:01:54.815088 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e203aba-b679-45b3-9987-8a63bdb556db","Type":"ContainerDied","Data":"45d34b4d7c70dddd2467b9e38385da961dad5b64f625d55daf6fd4518f74ce84"} Jan 28 17:01:54 crc kubenswrapper[4877]: I0128 17:01:54.902291 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.286695 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-66b66545b5-ldnrl"] Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.310885 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7486d7b6df-vf9q9"] Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.327918 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-66df9b7b7d-nkmrc"] Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.331059 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.335025 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.335227 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.377845 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-66df9b7b7d-nkmrc"] Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.386131 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-759d866587-ps7h5"] Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.400171 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.413039 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-759d866587-ps7h5"] Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.416399 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.417366 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.473172 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66ghp\" (UniqueName: \"kubernetes.io/projected/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-kube-api-access-66ghp\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.474103 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-combined-ca-bundle\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.474275 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data-custom\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.474515 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-internal-tls-certs\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.474827 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.475197 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-internal-tls-certs\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.475341 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mchb6\" (UniqueName: \"kubernetes.io/projected/c6e0515b-5a47-473c-859a-8cbc2f02d959-kube-api-access-mchb6\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.475465 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data-custom\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.475574 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-public-tls-certs\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.475682 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-public-tls-certs\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.475840 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.475991 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-combined-ca-bundle\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578430 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66ghp\" (UniqueName: \"kubernetes.io/projected/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-kube-api-access-66ghp\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578608 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-combined-ca-bundle\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578643 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data-custom\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578685 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-internal-tls-certs\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578762 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578855 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-internal-tls-certs\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578878 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mchb6\" (UniqueName: \"kubernetes.io/projected/c6e0515b-5a47-473c-859a-8cbc2f02d959-kube-api-access-mchb6\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578909 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data-custom\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578933 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-public-tls-certs\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578958 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-public-tls-certs\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.578989 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.579022 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-combined-ca-bundle\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.591931 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-combined-ca-bundle\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.592464 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-public-tls-certs\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.593680 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-internal-tls-certs\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.594263 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-combined-ca-bundle\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.595321 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.595404 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data-custom\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.598509 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-internal-tls-certs\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.602044 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data-custom\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.606755 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-public-tls-certs\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.608064 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.609332 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mchb6\" (UniqueName: \"kubernetes.io/projected/c6e0515b-5a47-473c-859a-8cbc2f02d959-kube-api-access-mchb6\") pod \"heat-api-66df9b7b7d-nkmrc\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.632649 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66ghp\" (UniqueName: \"kubernetes.io/projected/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-kube-api-access-66ghp\") pod \"heat-cfnapi-759d866587-ps7h5\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.632938 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.701505 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.760054 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.853598 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dfxx8" event={"ID":"7749a4b7-d909-4b9a-ae04-acfcf29f916c","Type":"ContainerDied","Data":"85ffb95d986f1c7f0a9d34e7947c1229e2f35587ca261b47a511649706988966"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.853640 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85ffb95d986f1c7f0a9d34e7947c1229e2f35587ca261b47a511649706988966" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.854827 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6db6c6788c-7mlr2" event={"ID":"2c19f87c-83c1-4c2d-97fc-f50a89480878","Type":"ContainerStarted","Data":"1c1eb9faa56831c4f41bd5426919a8aeaf5e9f3c630749a2204b2907e7ab0403"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.873088 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" event={"ID":"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5","Type":"ContainerDied","Data":"8b1d4cfacd7a8f7d72e97ad8a7e7a03c2d15dd3037595b1df47cb34cbece7ab7"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.873142 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b1d4cfacd7a8f7d72e97ad8a7e7a03c2d15dd3037595b1df47cb34cbece7ab7" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.880506 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.881085 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-417f-account-create-update-d9sds" event={"ID":"11c0eeb0-1466-4faf-ae33-e74028802131","Type":"ContainerDied","Data":"0e8415095e34b4985101316442a8ff00882ed4ddedadb8c46da19cb706194b0c"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.881117 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e8415095e34b4985101316442a8ff00882ed4ddedadb8c46da19cb706194b0c" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.892228 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ph4bn" event={"ID":"764294ed-9715-4d81-b7b6-50a4104630fd","Type":"ContainerDied","Data":"3372f4134f4c595ad705725fd5d4e2786566aba28d01d920e526c39c1b7bad47"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.892267 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3372f4134f4c595ad705725fd5d4e2786566aba28d01d920e526c39c1b7bad47" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.893747 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.903969 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c6e0-account-create-update-stjf9" event={"ID":"59e30121-a8ea-4d16-8b90-659a6158def9","Type":"ContainerDied","Data":"7c4e86559f3a902848325217f06e662f0a11c6ad24eb2c655b0c57dc88fb46c9"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.904021 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c4e86559f3a902848325217f06e662f0a11c6ad24eb2c655b0c57dc88fb46c9" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.906965 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.907435 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-tkw8b" event={"ID":"19c75c24-13dd-439a-9245-c02f9e6d8ec7","Type":"ContainerDied","Data":"c5f94d5701d3d95b9ba1b2b2f1be2040d5559111dacc16c1711b8e03ceb4af49"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.907459 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5f94d5701d3d95b9ba1b2b2f1be2040d5559111dacc16c1711b8e03ceb4af49" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.927144 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d5585959-hndnh" event={"ID":"fc2f618a-c56e-4c06-a365-be3073f2c2ae","Type":"ContainerDied","Data":"93a55b32eedc48cf6de915dc47866715b62a5e249d7695b478f8a07f676dc280"} Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.927193 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93a55b32eedc48cf6de915dc47866715b62a5e249d7695b478f8a07f676dc280" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.956737 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.957847 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:55 crc kubenswrapper[4877]: I0128 17:01:55.966927 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.012839 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.036112 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7749a4b7-d909-4b9a-ae04-acfcf29f916c-operator-scripts\") pod \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.036227 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gjfn\" (UniqueName: \"kubernetes.io/projected/11c0eeb0-1466-4faf-ae33-e74028802131-kube-api-access-5gjfn\") pod \"11c0eeb0-1466-4faf-ae33-e74028802131\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.036261 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59e30121-a8ea-4d16-8b90-659a6158def9-operator-scripts\") pod \"59e30121-a8ea-4d16-8b90-659a6158def9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.036303 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45lv7\" (UniqueName: \"kubernetes.io/projected/7749a4b7-d909-4b9a-ae04-acfcf29f916c-kube-api-access-45lv7\") pod \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\" (UID: \"7749a4b7-d909-4b9a-ae04-acfcf29f916c\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.036325 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmbsp\" (UniqueName: \"kubernetes.io/projected/19c75c24-13dd-439a-9245-c02f9e6d8ec7-kube-api-access-hmbsp\") pod \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.036351 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqchl\" (UniqueName: \"kubernetes.io/projected/59e30121-a8ea-4d16-8b90-659a6158def9-kube-api-access-qqchl\") pod \"59e30121-a8ea-4d16-8b90-659a6158def9\" (UID: \"59e30121-a8ea-4d16-8b90-659a6158def9\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.036426 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-operator-scripts\") pod \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.040795 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764294ed-9715-4d81-b7b6-50a4104630fd-operator-scripts\") pod \"764294ed-9715-4d81-b7b6-50a4104630fd\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.040880 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-758j6\" (UniqueName: \"kubernetes.io/projected/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-kube-api-access-758j6\") pod \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\" (UID: \"b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.040904 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11c0eeb0-1466-4faf-ae33-e74028802131-operator-scripts\") pod \"11c0eeb0-1466-4faf-ae33-e74028802131\" (UID: \"11c0eeb0-1466-4faf-ae33-e74028802131\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.040955 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c75c24-13dd-439a-9245-c02f9e6d8ec7-operator-scripts\") pod \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\" (UID: \"19c75c24-13dd-439a-9245-c02f9e6d8ec7\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.041063 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvpck\" (UniqueName: \"kubernetes.io/projected/764294ed-9715-4d81-b7b6-50a4104630fd-kube-api-access-gvpck\") pod \"764294ed-9715-4d81-b7b6-50a4104630fd\" (UID: \"764294ed-9715-4d81-b7b6-50a4104630fd\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.041896 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7749a4b7-d909-4b9a-ae04-acfcf29f916c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7749a4b7-d909-4b9a-ae04-acfcf29f916c" (UID: "7749a4b7-d909-4b9a-ae04-acfcf29f916c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.042225 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7749a4b7-d909-4b9a-ae04-acfcf29f916c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.042700 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11c0eeb0-1466-4faf-ae33-e74028802131-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11c0eeb0-1466-4faf-ae33-e74028802131" (UID: "11c0eeb0-1466-4faf-ae33-e74028802131"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.043500 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59e30121-a8ea-4d16-8b90-659a6158def9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "59e30121-a8ea-4d16-8b90-659a6158def9" (UID: "59e30121-a8ea-4d16-8b90-659a6158def9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.044002 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5" (UID: "b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.047869 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19c75c24-13dd-439a-9245-c02f9e6d8ec7-kube-api-access-hmbsp" (OuterVolumeSpecName: "kube-api-access-hmbsp") pod "19c75c24-13dd-439a-9245-c02f9e6d8ec7" (UID: "19c75c24-13dd-439a-9245-c02f9e6d8ec7"). InnerVolumeSpecName "kube-api-access-hmbsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.052842 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/764294ed-9715-4d81-b7b6-50a4104630fd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "764294ed-9715-4d81-b7b6-50a4104630fd" (UID: "764294ed-9715-4d81-b7b6-50a4104630fd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.054829 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11c0eeb0-1466-4faf-ae33-e74028802131-kube-api-access-5gjfn" (OuterVolumeSpecName: "kube-api-access-5gjfn") pod "11c0eeb0-1466-4faf-ae33-e74028802131" (UID: "11c0eeb0-1466-4faf-ae33-e74028802131"). InnerVolumeSpecName "kube-api-access-5gjfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.054991 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7749a4b7-d909-4b9a-ae04-acfcf29f916c-kube-api-access-45lv7" (OuterVolumeSpecName: "kube-api-access-45lv7") pod "7749a4b7-d909-4b9a-ae04-acfcf29f916c" (UID: "7749a4b7-d909-4b9a-ae04-acfcf29f916c"). InnerVolumeSpecName "kube-api-access-45lv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.059109 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19c75c24-13dd-439a-9245-c02f9e6d8ec7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "19c75c24-13dd-439a-9245-c02f9e6d8ec7" (UID: "19c75c24-13dd-439a-9245-c02f9e6d8ec7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.077199 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-kube-api-access-758j6" (OuterVolumeSpecName: "kube-api-access-758j6") pod "b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5" (UID: "b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5"). InnerVolumeSpecName "kube-api-access-758j6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.077238 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/764294ed-9715-4d81-b7b6-50a4104630fd-kube-api-access-gvpck" (OuterVolumeSpecName: "kube-api-access-gvpck") pod "764294ed-9715-4d81-b7b6-50a4104630fd" (UID: "764294ed-9715-4d81-b7b6-50a4104630fd"). InnerVolumeSpecName "kube-api-access-gvpck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.077302 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59e30121-a8ea-4d16-8b90-659a6158def9-kube-api-access-qqchl" (OuterVolumeSpecName: "kube-api-access-qqchl") pod "59e30121-a8ea-4d16-8b90-659a6158def9" (UID: "59e30121-a8ea-4d16-8b90-659a6158def9"). InnerVolumeSpecName "kube-api-access-qqchl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.161845 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-config\") pod \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.161912 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4j6s\" (UniqueName: \"kubernetes.io/projected/fc2f618a-c56e-4c06-a365-be3073f2c2ae-kube-api-access-g4j6s\") pod \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.161970 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-sb\") pod \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.162083 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-svc\") pod \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.162154 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-swift-storage-0\") pod \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.162329 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-nb\") pod \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\" (UID: \"fc2f618a-c56e-4c06-a365-be3073f2c2ae\") " Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163249 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvpck\" (UniqueName: \"kubernetes.io/projected/764294ed-9715-4d81-b7b6-50a4104630fd-kube-api-access-gvpck\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163267 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gjfn\" (UniqueName: \"kubernetes.io/projected/11c0eeb0-1466-4faf-ae33-e74028802131-kube-api-access-5gjfn\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163280 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59e30121-a8ea-4d16-8b90-659a6158def9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163292 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45lv7\" (UniqueName: \"kubernetes.io/projected/7749a4b7-d909-4b9a-ae04-acfcf29f916c-kube-api-access-45lv7\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163303 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmbsp\" (UniqueName: \"kubernetes.io/projected/19c75c24-13dd-439a-9245-c02f9e6d8ec7-kube-api-access-hmbsp\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163314 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqchl\" (UniqueName: \"kubernetes.io/projected/59e30121-a8ea-4d16-8b90-659a6158def9-kube-api-access-qqchl\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163325 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163335 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/764294ed-9715-4d81-b7b6-50a4104630fd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163349 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-758j6\" (UniqueName: \"kubernetes.io/projected/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5-kube-api-access-758j6\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163359 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11c0eeb0-1466-4faf-ae33-e74028802131-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.163371 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19c75c24-13dd-439a-9245-c02f9e6d8ec7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.191016 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc2f618a-c56e-4c06-a365-be3073f2c2ae-kube-api-access-g4j6s" (OuterVolumeSpecName: "kube-api-access-g4j6s") pod "fc2f618a-c56e-4c06-a365-be3073f2c2ae" (UID: "fc2f618a-c56e-4c06-a365-be3073f2c2ae"). InnerVolumeSpecName "kube-api-access-g4j6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.266161 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fc2f618a-c56e-4c06-a365-be3073f2c2ae" (UID: "fc2f618a-c56e-4c06-a365-be3073f2c2ae"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.272909 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4j6s\" (UniqueName: \"kubernetes.io/projected/fc2f618a-c56e-4c06-a365-be3073f2c2ae-kube-api-access-g4j6s\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.272944 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.313167 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.317738 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="327e7593-5623-475c-ad8f-2456a437a645" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.219:8080/\": dial tcp 10.217.0.219:8080: connect: connection refused" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.321993 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fc2f618a-c56e-4c06-a365-be3073f2c2ae" (UID: "fc2f618a-c56e-4c06-a365-be3073f2c2ae"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.322933 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fc2f618a-c56e-4c06-a365-be3073f2c2ae" (UID: "fc2f618a-c56e-4c06-a365-be3073f2c2ae"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.333801 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "fc2f618a-c56e-4c06-a365-be3073f2c2ae" (UID: "fc2f618a-c56e-4c06-a365-be3073f2c2ae"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.369601 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-config" (OuterVolumeSpecName: "config") pod "fc2f618a-c56e-4c06-a365-be3073f2c2ae" (UID: "fc2f618a-c56e-4c06-a365-be3073f2c2ae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.389778 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.394259 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.394304 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.394342 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc2f618a-c56e-4c06-a365-be3073f2c2ae-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:56 crc kubenswrapper[4877]: I0128 17:01:56.971184 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6e0-account-create-update-stjf9" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:56.976667 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:56.978826 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-tkw8b" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:56.981376 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-417f-account-create-update-d9sds" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:56.981419 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ph4bn" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:56.981381 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-71e2-account-create-update-xnpjr" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:56.981687 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfxx8" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:57.089891 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6d5767f7f9-8pfmz"] Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:57.962674 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:57.963422 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:57.963670 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hhmdh"] Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:57.963690 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-b5644bdb6-c7hcq"] Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:57.963701 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-55d5d56894-j6z8h"] Jan 28 17:01:57 crc kubenswrapper[4877]: I0128 17:01:57.963714 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-759d866587-ps7h5"] Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.041976 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6d5767f7f9-8pfmz" event={"ID":"f8dea33a-3c8f-43eb-af20-df530ec7a89d","Type":"ContainerStarted","Data":"cc3891ff96e99de23f3cdb47f29f5ca6b03a49e9e3450d7df4b9de5aae6ba84a"} Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.045495 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"54a9533f-7d16-4422-ac68-5ff6e34ddf39","Type":"ContainerDied","Data":"526207bef1f52df9f9a84a9f9fea1846c224f44e534a726c492f31933d830d3d"} Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.045553 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="526207bef1f52df9f9a84a9f9fea1846c224f44e534a726c492f31933d830d3d" Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.049657 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1e203aba-b679-45b3-9987-8a63bdb556db","Type":"ContainerDied","Data":"320fb0fa31dafc6b492eefc6c9de42861f0448a04929bf5d23fd630e4c7429be"} Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.051254 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="320fb0fa31dafc6b492eefc6c9de42861f0448a04929bf5d23fd630e4c7429be" Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.360870 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.383338 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-66df9b7b7d-nkmrc"] Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.383709 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:58 crc kubenswrapper[4877]: I0128 17:01:58.846993 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:58.994564 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-combined-ca-bundle\") pod \"1e203aba-b679-45b3-9987-8a63bdb556db\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.004806 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-scripts\") pod \"1e203aba-b679-45b3-9987-8a63bdb556db\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.005060 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vp7d8\" (UniqueName: \"kubernetes.io/projected/1e203aba-b679-45b3-9987-8a63bdb556db-kube-api-access-vp7d8\") pod \"1e203aba-b679-45b3-9987-8a63bdb556db\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.005230 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e203aba-b679-45b3-9987-8a63bdb556db-logs\") pod \"1e203aba-b679-45b3-9987-8a63bdb556db\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.005320 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data\") pod \"1e203aba-b679-45b3-9987-8a63bdb556db\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.008295 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e203aba-b679-45b3-9987-8a63bdb556db-logs" (OuterVolumeSpecName: "logs") pod "1e203aba-b679-45b3-9987-8a63bdb556db" (UID: "1e203aba-b679-45b3-9987-8a63bdb556db"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.013652 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e203aba-b679-45b3-9987-8a63bdb556db-etc-machine-id\") pod \"1e203aba-b679-45b3-9987-8a63bdb556db\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.013703 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data-custom\") pod \"1e203aba-b679-45b3-9987-8a63bdb556db\" (UID: \"1e203aba-b679-45b3-9987-8a63bdb556db\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.015524 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e203aba-b679-45b3-9987-8a63bdb556db-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.015592 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1e203aba-b679-45b3-9987-8a63bdb556db-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1e203aba-b679-45b3-9987-8a63bdb556db" (UID: "1e203aba-b679-45b3-9987-8a63bdb556db"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.016401 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e203aba-b679-45b3-9987-8a63bdb556db-kube-api-access-vp7d8" (OuterVolumeSpecName: "kube-api-access-vp7d8") pod "1e203aba-b679-45b3-9987-8a63bdb556db" (UID: "1e203aba-b679-45b3-9987-8a63bdb556db"). InnerVolumeSpecName "kube-api-access-vp7d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.022766 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-scripts" (OuterVolumeSpecName: "scripts") pod "1e203aba-b679-45b3-9987-8a63bdb556db" (UID: "1e203aba-b679-45b3-9987-8a63bdb556db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.036951 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.049924 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1e203aba-b679-45b3-9987-8a63bdb556db" (UID: "1e203aba-b679-45b3-9987-8a63bdb556db"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.092274 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e203aba-b679-45b3-9987-8a63bdb556db" (UID: "1e203aba-b679-45b3-9987-8a63bdb556db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.114439 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhmdh" event={"ID":"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728","Type":"ContainerStarted","Data":"d248fcbbb3147752bf0c9186a91839847b88f7c55a76496336a5799a51ab8bf2"} Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.117855 4877 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1e203aba-b679-45b3-9987-8a63bdb556db-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.117925 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.117936 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.117945 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.117954 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vp7d8\" (UniqueName: \"kubernetes.io/projected/1e203aba-b679-45b3-9987-8a63bdb556db-kube-api-access-vp7d8\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.122457 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-759d866587-ps7h5" event={"ID":"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb","Type":"ContainerStarted","Data":"9b7fdea0232027f4582133767532a274601356e3896bf47a343f5c3ed10193c2"} Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.138408 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66df9b7b7d-nkmrc" event={"ID":"c6e0515b-5a47-473c-859a-8cbc2f02d959","Type":"ContainerStarted","Data":"a0c9028679cc2730e135fb337f6b72dac0ded0eec36425c946bdd4afea3d9fc5"} Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.146039 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55d5d56894-j6z8h" event={"ID":"0d4944b1-cca6-4ace-9334-12dd2b981be8","Type":"ContainerStarted","Data":"31a6fcc2fb4b11bc5c560aa012f970c9270eb10f9b07b199b30081e65988a1e0"} Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.147628 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6db6c6788c-7mlr2" event={"ID":"2c19f87c-83c1-4c2d-97fc-f50a89480878","Type":"ContainerStarted","Data":"f1d62a1a91ba9ce032b18b304e4f36fcd9448b5553089de679060e128200b3d2"} Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.150968 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.151067 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.151744 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" event={"ID":"4de40718-80f2-48ae-8929-32a2c0e96707","Type":"ContainerStarted","Data":"5c671452fdabb2e000fbd49eff11431519c79159b5bfcd0d1eeae8b3995ec670"} Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.153370 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.153432 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.219088 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-scripts\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.219181 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-logs\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.219344 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-httpd-run\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.219374 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-combined-ca-bundle\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.219438 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-public-tls-certs\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.220704 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.220753 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5hjm\" (UniqueName: \"kubernetes.io/projected/54a9533f-7d16-4422-ac68-5ff6e34ddf39-kube-api-access-j5hjm\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.220793 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-config-data\") pod \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\" (UID: \"54a9533f-7d16-4422-ac68-5ff6e34ddf39\") " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.221182 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data" (OuterVolumeSpecName: "config-data") pod "1e203aba-b679-45b3-9987-8a63bdb556db" (UID: "1e203aba-b679-45b3-9987-8a63bdb556db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.221864 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e203aba-b679-45b3-9987-8a63bdb556db-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.223232 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.223575 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-logs" (OuterVolumeSpecName: "logs") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.246026 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-scripts" (OuterVolumeSpecName: "scripts") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.247179 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54a9533f-7d16-4422-ac68-5ff6e34ddf39-kube-api-access-j5hjm" (OuterVolumeSpecName: "kube-api-access-j5hjm") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "kube-api-access-j5hjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.324837 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5hjm\" (UniqueName: \"kubernetes.io/projected/54a9533f-7d16-4422-ac68-5ff6e34ddf39-kube-api-access-j5hjm\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.324882 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.324898 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.324909 4877 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/54a9533f-7d16-4422-ac68-5ff6e34ddf39-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.421424 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0" (OuterVolumeSpecName: "glance") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.428550 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") on node \"crc\" " Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.765763 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.766385 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0") on node "crc" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.839449 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.914828 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.966757 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:01:59 crc kubenswrapper[4877]: I0128 17:01:59.988694 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-config-data" (OuterVolumeSpecName: "config-data") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.017693 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "54a9533f-7d16-4422-ac68-5ff6e34ddf39" (UID: "54a9533f-7d16-4422-ac68-5ff6e34ddf39"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.069346 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.069382 4877 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54a9533f-7d16-4422-ac68-5ff6e34ddf39-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.169340 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6db6c6788c-7mlr2" event={"ID":"2c19f87c-83c1-4c2d-97fc-f50a89480878","Type":"ContainerStarted","Data":"990671ccc00cb725c3d8ea3939e354396e2682907439506425454d8e76f63bce"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.169867 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.169931 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.169995 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" event={"ID":"4de40718-80f2-48ae-8929-32a2c0e96707","Type":"ContainerStarted","Data":"aa25a1b0108db58da8462e80478184b510b2f77a94185ea97365c7ae28118e8e"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.170351 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.172593 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66df9b7b7d-nkmrc" event={"ID":"c6e0515b-5a47-473c-859a-8cbc2f02d959","Type":"ContainerStarted","Data":"c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.172716 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.175382 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-759d866587-ps7h5" event={"ID":"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb","Type":"ContainerStarted","Data":"f92c9d65a444b3b7245b0b3547044751868343c3e441bcaec87c5ba38f246655"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.175505 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.177777 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66b66545b5-ldnrl" event={"ID":"db1f5546-0eed-4bf4-bd25-065718c91a46","Type":"ContainerStarted","Data":"9221b44fad16ae9be88c646cb760d9c38358bb8592f689a92da95e8627118c98"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.177834 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.177887 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-66b66545b5-ldnrl" podUID="db1f5546-0eed-4bf4-bd25-065718c91a46" containerName="heat-api" containerID="cri-o://9221b44fad16ae9be88c646cb760d9c38358bb8592f689a92da95e8627118c98" gracePeriod=60 Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.191129 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55d5d56894-j6z8h" event={"ID":"0d4944b1-cca6-4ace-9334-12dd2b981be8","Type":"ContainerStarted","Data":"cf699f9aa10797ffded4b4e11d2ea90d808ded4cf4d997d253668fb325be3b64"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.191682 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.198928 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" event={"ID":"8355c6a7-af56-4b68-bd65-560a99273480","Type":"ContainerStarted","Data":"27a6b7d4000d41610f5d798276ad6c12b77825333cc2be8a645d4374f57c34d8"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.199041 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.199081 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" podUID="8355c6a7-af56-4b68-bd65-560a99273480" containerName="heat-cfnapi" containerID="cri-o://27a6b7d4000d41610f5d798276ad6c12b77825333cc2be8a645d4374f57c34d8" gracePeriod=60 Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.203082 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6d5767f7f9-8pfmz" event={"ID":"f8dea33a-3c8f-43eb-af20-df530ec7a89d","Type":"ContainerStarted","Data":"ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.204062 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.229615 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerStarted","Data":"dcd49f3dfa8ab48f70a26751cd6ce2d27dc33a551ceedb57b44c8724f4f67998"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.249341 4877 generic.go:334] "Generic (PLEG): container finished" podID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerID="4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721" exitCode=0 Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.250579 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhmdh" event={"ID":"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728","Type":"ContainerDied","Data":"4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721"} Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.250969 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6db6c6788c-7mlr2" podStartSLOduration=10.250946505 podStartE2EDuration="10.250946505s" podCreationTimestamp="2026-01-28 17:01:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:00.200577928 +0000 UTC m=+1623.758904816" watchObservedRunningTime="2026-01-28 17:02:00.250946505 +0000 UTC m=+1623.809273393" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.262041 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-759d866587-ps7h5" podStartSLOduration=5.262024584 podStartE2EDuration="5.262024584s" podCreationTimestamp="2026-01-28 17:01:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:00.256720631 +0000 UTC m=+1623.815047539" watchObservedRunningTime="2026-01-28 17:02:00.262024584 +0000 UTC m=+1623.820351472" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.324799 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-66b66545b5-ldnrl" podStartSLOduration=12.473199565 podStartE2EDuration="21.324781986s" podCreationTimestamp="2026-01-28 17:01:39 +0000 UTC" firstStartedPulling="2026-01-28 17:01:47.532714995 +0000 UTC m=+1611.091041883" lastFinishedPulling="2026-01-28 17:01:56.384297416 +0000 UTC m=+1619.942624304" observedRunningTime="2026-01-28 17:02:00.281960981 +0000 UTC m=+1623.840287879" watchObservedRunningTime="2026-01-28 17:02:00.324781986 +0000 UTC m=+1623.883108874" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.367533 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.396616 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.423975 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-55d5d56894-j6z8h" podStartSLOduration=10.423954988 podStartE2EDuration="10.423954988s" podCreationTimestamp="2026-01-28 17:01:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:00.344451656 +0000 UTC m=+1623.902778544" watchObservedRunningTime="2026-01-28 17:02:00.423954988 +0000 UTC m=+1623.982281876" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.432622 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433206 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19c75c24-13dd-439a-9245-c02f9e6d8ec7" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433223 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="19c75c24-13dd-439a-9245-c02f9e6d8ec7" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433247 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api-log" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433255 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api-log" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433269 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433277 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433310 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11c0eeb0-1466-4faf-ae33-e74028802131" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433318 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="11c0eeb0-1466-4faf-ae33-e74028802131" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433332 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59e30121-a8ea-4d16-8b90-659a6158def9" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433339 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="59e30121-a8ea-4d16-8b90-659a6158def9" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433350 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7749a4b7-d909-4b9a-ae04-acfcf29f916c" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433356 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7749a4b7-d909-4b9a-ae04-acfcf29f916c" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433365 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2f618a-c56e-4c06-a365-be3073f2c2ae" containerName="init" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433370 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2f618a-c56e-4c06-a365-be3073f2c2ae" containerName="init" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433395 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-log" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433401 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-log" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433415 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="764294ed-9715-4d81-b7b6-50a4104630fd" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433423 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="764294ed-9715-4d81-b7b6-50a4104630fd" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433434 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-httpd" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433440 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-httpd" Jan 28 17:02:00 crc kubenswrapper[4877]: E0128 17:02:00.433451 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433456 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433675 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="19c75c24-13dd-439a-9245-c02f9e6d8ec7" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433689 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="11c0eeb0-1466-4faf-ae33-e74028802131" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433702 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-httpd" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433712 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="764294ed-9715-4d81-b7b6-50a4104630fd" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433722 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433731 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc2f618a-c56e-4c06-a365-be3073f2c2ae" containerName="init" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433742 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="7749a4b7-d909-4b9a-ae04-acfcf29f916c" containerName="mariadb-database-create" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433751 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433766 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="59e30121-a8ea-4d16-8b90-659a6158def9" containerName="mariadb-account-create-update" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433778 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api-log" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.433788 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" containerName="glance-log" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.435310 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.439263 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.439583 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.440238 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.480536 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.483986 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" podStartSLOduration=10.483965185 podStartE2EDuration="10.483965185s" podCreationTimestamp="2026-01-28 17:01:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:00.379725626 +0000 UTC m=+1623.938052514" watchObservedRunningTime="2026-01-28 17:02:00.483965185 +0000 UTC m=+1624.042292063" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.584981 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" podStartSLOduration=12.733407137 podStartE2EDuration="21.584956327s" podCreationTimestamp="2026-01-28 17:01:39 +0000 UTC" firstStartedPulling="2026-01-28 17:01:47.353216586 +0000 UTC m=+1610.911543474" lastFinishedPulling="2026-01-28 17:01:56.204765776 +0000 UTC m=+1619.763092664" observedRunningTime="2026-01-28 17:02:00.417288299 +0000 UTC m=+1623.975615197" watchObservedRunningTime="2026-01-28 17:02:00.584956327 +0000 UTC m=+1624.143283215" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.587407 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.587776 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb414829-821c-4e0e-b099-26253c6a538a-logs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.587895 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-scripts\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.588042 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2wbg\" (UniqueName: \"kubernetes.io/projected/fb414829-821c-4e0e-b099-26253c6a538a-kube-api-access-x2wbg\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.588176 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb414829-821c-4e0e-b099-26253c6a538a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.594667 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.594783 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.594886 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-config-data\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.595006 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-config-data-custom\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.671663 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-66df9b7b7d-nkmrc" podStartSLOduration=5.671637753 podStartE2EDuration="5.671637753s" podCreationTimestamp="2026-01-28 17:01:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:00.439033134 +0000 UTC m=+1623.997360032" watchObservedRunningTime="2026-01-28 17:02:00.671637753 +0000 UTC m=+1624.229964641" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.698883 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.698951 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699003 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-config-data\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699059 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-config-data-custom\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699098 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699118 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb414829-821c-4e0e-b099-26253c6a538a-logs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699131 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-scripts\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699152 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2wbg\" (UniqueName: \"kubernetes.io/projected/fb414829-821c-4e0e-b099-26253c6a538a-kube-api-access-x2wbg\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699178 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb414829-821c-4e0e-b099-26253c6a538a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.699271 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fb414829-821c-4e0e-b099-26253c6a538a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.701990 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb414829-821c-4e0e-b099-26253c6a538a-logs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.720256 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-scripts\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.733559 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-config-data\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.737043 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.741788 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.744128 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-config-data-custom\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.744200 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-86b8867947-jkp8w" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.747101 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2wbg\" (UniqueName: \"kubernetes.io/projected/fb414829-821c-4e0e-b099-26253c6a538a-kube-api-access-x2wbg\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.753140 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb414829-821c-4e0e-b099-26253c6a538a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"fb414829-821c-4e0e-b099-26253c6a538a\") " pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.764880 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.768128 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.805071 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.906689 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.909105 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.920808 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.920999 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 17:02:00 crc kubenswrapper[4877]: I0128 17:02:00.987025 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.000453 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.000894 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-6d5767f7f9-8pfmz" podStartSLOduration=11.000868767 podStartE2EDuration="11.000868767s" podCreationTimestamp="2026-01-28 17:01:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:00.581213007 +0000 UTC m=+1624.139539905" watchObservedRunningTime="2026-01-28 17:02:01.000868767 +0000 UTC m=+1624.559195655" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063391 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063454 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063520 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-config-data\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063657 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063702 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb79v\" (UniqueName: \"kubernetes.io/projected/368f487a-75ec-44d5-94a2-807b0a152c5d-kube-api-access-pb79v\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063756 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-scripts\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063790 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/368f487a-75ec-44d5-94a2-807b0a152c5d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.063862 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/368f487a-75ec-44d5-94a2-807b0a152c5d-logs\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.173085 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.184533 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb79v\" (UniqueName: \"kubernetes.io/projected/368f487a-75ec-44d5-94a2-807b0a152c5d-kube-api-access-pb79v\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.184693 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-scripts\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.184739 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/368f487a-75ec-44d5-94a2-807b0a152c5d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.184884 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/368f487a-75ec-44d5-94a2-807b0a152c5d-logs\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.185041 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.185074 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.185155 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-config-data\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.185160 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.185196 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5992022521d9a8a870a78abda3cb3974fc8658c66623b3cbfa6fe8c84dc59df6/globalmount\"" pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.176567 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69c986f6d7-bls2q"] Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.185738 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" podUID="55414a13-6cc8-42cb-bb48-610740b92289" containerName="dnsmasq-dns" containerID="cri-o://ce4ec7bcf353c45f6950bb0d9ef9f7033b93e11f9430a850d977be89392f4b7f" gracePeriod=10 Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.185748 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/368f487a-75ec-44d5-94a2-807b0a152c5d-logs\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.201126 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/368f487a-75ec-44d5-94a2-807b0a152c5d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.205940 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.210124 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-scripts\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.212689 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.222510 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb79v\" (UniqueName: \"kubernetes.io/projected/368f487a-75ec-44d5-94a2-807b0a152c5d-kube-api-access-pb79v\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.226775 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/368f487a-75ec-44d5-94a2-807b0a152c5d-config-data\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.281962 4877 generic.go:334] "Generic (PLEG): container finished" podID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerID="cf699f9aa10797ffded4b4e11d2ea90d808ded4cf4d997d253668fb325be3b64" exitCode=1 Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.282030 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55d5d56894-j6z8h" event={"ID":"0d4944b1-cca6-4ace-9334-12dd2b981be8","Type":"ContainerDied","Data":"cf699f9aa10797ffded4b4e11d2ea90d808ded4cf4d997d253668fb325be3b64"} Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.282909 4877 scope.go:117] "RemoveContainer" containerID="cf699f9aa10797ffded4b4e11d2ea90d808ded4cf4d997d253668fb325be3b64" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.312458 4877 generic.go:334] "Generic (PLEG): container finished" podID="4de40718-80f2-48ae-8929-32a2c0e96707" containerID="aa25a1b0108db58da8462e80478184b510b2f77a94185ea97365c7ae28118e8e" exitCode=1 Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.314191 4877 scope.go:117] "RemoveContainer" containerID="aa25a1b0108db58da8462e80478184b510b2f77a94185ea97365c7ae28118e8e" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.314894 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" event={"ID":"4de40718-80f2-48ae-8929-32a2c0e96707","Type":"ContainerDied","Data":"aa25a1b0108db58da8462e80478184b510b2f77a94185ea97365c7ae28118e8e"} Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.343543 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:02:01 crc kubenswrapper[4877]: E0128 17:02:01.343941 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.359497 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f953cabc-8af9-4207-8fbb-0d3e62c6d2e0\") pod \"glance-default-external-api-0\" (UID: \"368f487a-75ec-44d5-94a2-807b0a152c5d\") " pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.408165 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" path="/var/lib/kubelet/pods/1e203aba-b679-45b3-9987-8a63bdb556db/volumes" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.409174 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54a9533f-7d16-4422-ac68-5ff6e34ddf39" path="/var/lib/kubelet/pods/54a9533f-7d16-4422-ac68-5ff6e34ddf39/volumes" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.419030 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.650853 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 28 17:02:01 crc kubenswrapper[4877]: I0128 17:02:01.789147 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.097595 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.113590 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.359837 4877 generic.go:334] "Generic (PLEG): container finished" podID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerID="297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1" exitCode=0 Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.359952 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhmdh" event={"ID":"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728","Type":"ContainerDied","Data":"297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1"} Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.366088 4877 generic.go:334] "Generic (PLEG): container finished" podID="55414a13-6cc8-42cb-bb48-610740b92289" containerID="ce4ec7bcf353c45f6950bb0d9ef9f7033b93e11f9430a850d977be89392f4b7f" exitCode=0 Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.366167 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" event={"ID":"55414a13-6cc8-42cb-bb48-610740b92289","Type":"ContainerDied","Data":"ce4ec7bcf353c45f6950bb0d9ef9f7033b93e11f9430a850d977be89392f4b7f"} Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.366193 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" event={"ID":"55414a13-6cc8-42cb-bb48-610740b92289","Type":"ContainerDied","Data":"e21f6db4efbc1e331291924b4b319920e096b39267594c9ddda602eadc839770"} Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.366205 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e21f6db4efbc1e331291924b4b319920e096b39267594c9ddda602eadc839770" Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.371123 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fb414829-821c-4e0e-b099-26253c6a538a","Type":"ContainerStarted","Data":"69e6f329cf02781aa0cc93eeefc7a2ef2aa620893e3689756e0e7ac2dc582c04"} Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.638268 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.850271 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-j4pm4"] Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.852071 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.854242 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ktvl9" Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.854551 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.874634 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 28 17:02:02 crc kubenswrapper[4877]: I0128 17:02:02.916117 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-j4pm4"] Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:02.995898 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="1e203aba-b679-45b3-9987-8a63bdb556db" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.203:8776/healthcheck\": dial tcp 10.217.0.203:8776: i/o timeout (Client.Timeout exceeded while awaiting headers)" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.053813 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-scripts\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.053926 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.053989 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gth75\" (UniqueName: \"kubernetes.io/projected/d0456757-a0e3-42a7-900f-422828fe9836-kube-api-access-gth75\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.054041 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-config-data\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.156277 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-scripts\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.156382 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.156444 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gth75\" (UniqueName: \"kubernetes.io/projected/d0456757-a0e3-42a7-900f-422828fe9836-kube-api-access-gth75\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.156497 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-config-data\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.190772 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-scripts\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.191423 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-config-data\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.216310 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.221112 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gth75\" (UniqueName: \"kubernetes.io/projected/d0456757-a0e3-42a7-900f-422828fe9836-kube-api-access-gth75\") pod \"nova-cell0-conductor-db-sync-j4pm4\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.341972 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.365324 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-swift-storage-0\") pod \"55414a13-6cc8-42cb-bb48-610740b92289\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.365512 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-sb\") pod \"55414a13-6cc8-42cb-bb48-610740b92289\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.365611 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-nb\") pod \"55414a13-6cc8-42cb-bb48-610740b92289\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.365696 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-config\") pod \"55414a13-6cc8-42cb-bb48-610740b92289\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.365731 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnksq\" (UniqueName: \"kubernetes.io/projected/55414a13-6cc8-42cb-bb48-610740b92289-kube-api-access-fnksq\") pod \"55414a13-6cc8-42cb-bb48-610740b92289\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.365758 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-svc\") pod \"55414a13-6cc8-42cb-bb48-610740b92289\" (UID: \"55414a13-6cc8-42cb-bb48-610740b92289\") " Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.396429 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55414a13-6cc8-42cb-bb48-610740b92289-kube-api-access-fnksq" (OuterVolumeSpecName: "kube-api-access-fnksq") pod "55414a13-6cc8-42cb-bb48-610740b92289" (UID: "55414a13-6cc8-42cb-bb48-610740b92289"). InnerVolumeSpecName "kube-api-access-fnksq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:02:03 crc kubenswrapper[4877]: E0128 17:02:03.425629 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddaa3b3c0_3574_4ee2_a1ed_6ebe4ab96728.slice/crio-conmon-297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddaa3b3c0_3574_4ee2_a1ed_6ebe4ab96728.slice/crio-297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1.scope\": RecentStats: unable to find data in memory cache]" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.444851 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"368f487a-75ec-44d5-94a2-807b0a152c5d","Type":"ContainerStarted","Data":"bbf3c9697c0824e70d01f7db8f78f7a2286c54dc2e98fcb4da053c0c4d6b0581"} Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.449744 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55d5d56894-j6z8h" event={"ID":"0d4944b1-cca6-4ace-9334-12dd2b981be8","Type":"ContainerStarted","Data":"17704eafeb2a43ead19f7f794704f36f4ebad9eb484cf6e3fccdecbbccfc6428"} Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.451331 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.467919 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnksq\" (UniqueName: \"kubernetes.io/projected/55414a13-6cc8-42cb-bb48-610740b92289-kube-api-access-fnksq\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.521768 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.523341 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" event={"ID":"4de40718-80f2-48ae-8929-32a2c0e96707","Type":"ContainerStarted","Data":"6060488dbff87400bada19b38a6cdf3f0320f043c26cf76910b39d6cbb32382f"} Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.523906 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.602609 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-config" (OuterVolumeSpecName: "config") pod "55414a13-6cc8-42cb-bb48-610740b92289" (UID: "55414a13-6cc8-42cb-bb48-610740b92289"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.620884 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.675421 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.888722 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "55414a13-6cc8-42cb-bb48-610740b92289" (UID: "55414a13-6cc8-42cb-bb48-610740b92289"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:02:03 crc kubenswrapper[4877]: I0128 17:02:03.982724 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.167705 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "55414a13-6cc8-42cb-bb48-610740b92289" (UID: "55414a13-6cc8-42cb-bb48-610740b92289"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.203538 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.249829 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "55414a13-6cc8-42cb-bb48-610740b92289" (UID: "55414a13-6cc8-42cb-bb48-610740b92289"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.257136 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "55414a13-6cc8-42cb-bb48-610740b92289" (UID: "55414a13-6cc8-42cb-bb48-610740b92289"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.305357 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.305397 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55414a13-6cc8-42cb-bb48-610740b92289-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.320366 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-j4pm4"] Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.583248 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerStarted","Data":"1fddee63d2990da9c63b575f83c718fea9ee88781d6acd92b9ad88b8616fb1cd"} Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.583448 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-central-agent" containerID="cri-o://85e30d408e8e56325c43bfb76d65067668f2789838cbcc8a62569b11423b16df" gracePeriod=30 Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.583577 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="proxy-httpd" containerID="cri-o://1fddee63d2990da9c63b575f83c718fea9ee88781d6acd92b9ad88b8616fb1cd" gracePeriod=30 Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.583612 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="sg-core" containerID="cri-o://dcd49f3dfa8ab48f70a26751cd6ce2d27dc33a551ceedb57b44c8724f4f67998" gracePeriod=30 Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.583628 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.583642 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-notification-agent" containerID="cri-o://e797f444d22b70e52dd0bab83d1d5e39fbf174e7de12276164ab230b18f208ae" gracePeriod=30 Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.586701 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69c986f6d7-bls2q"] Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.600357 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhmdh" event={"ID":"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728","Type":"ContainerStarted","Data":"0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1"} Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.606382 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-69c986f6d7-bls2q"] Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.621697 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fb414829-821c-4e0e-b099-26253c6a538a","Type":"ContainerStarted","Data":"707611436e016aee3daf34ae51071da1fc944efcc0d6c02bbdce901020784d57"} Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.629963 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.714168755 podStartE2EDuration="18.629938539s" podCreationTimestamp="2026-01-28 17:01:46 +0000 UTC" firstStartedPulling="2026-01-28 17:01:48.682785642 +0000 UTC m=+1612.241112530" lastFinishedPulling="2026-01-28 17:02:01.598555426 +0000 UTC m=+1625.156882314" observedRunningTime="2026-01-28 17:02:04.615027327 +0000 UTC m=+1628.173354215" watchObservedRunningTime="2026-01-28 17:02:04.629938539 +0000 UTC m=+1628.188265427" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.642216 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" event={"ID":"d0456757-a0e3-42a7-900f-422828fe9836","Type":"ContainerStarted","Data":"9777ec0abacef928cf378ec1fc8f334ecad0a93e3fb0376f086035fed6788e6e"} Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.644692 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hhmdh" podStartSLOduration=8.560623554 podStartE2EDuration="11.644670456s" podCreationTimestamp="2026-01-28 17:01:53 +0000 UTC" firstStartedPulling="2026-01-28 17:02:00.255739854 +0000 UTC m=+1623.814066752" lastFinishedPulling="2026-01-28 17:02:03.339786766 +0000 UTC m=+1626.898113654" observedRunningTime="2026-01-28 17:02:04.638257113 +0000 UTC m=+1628.196584001" watchObservedRunningTime="2026-01-28 17:02:04.644670456 +0000 UTC m=+1628.202997344" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.675366 4877 generic.go:334] "Generic (PLEG): container finished" podID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerID="17704eafeb2a43ead19f7f794704f36f4ebad9eb484cf6e3fccdecbbccfc6428" exitCode=1 Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.675458 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55d5d56894-j6z8h" event={"ID":"0d4944b1-cca6-4ace-9334-12dd2b981be8","Type":"ContainerDied","Data":"17704eafeb2a43ead19f7f794704f36f4ebad9eb484cf6e3fccdecbbccfc6428"} Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.675568 4877 scope.go:117] "RemoveContainer" containerID="cf699f9aa10797ffded4b4e11d2ea90d808ded4cf4d997d253668fb325be3b64" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.681024 4877 scope.go:117] "RemoveContainer" containerID="17704eafeb2a43ead19f7f794704f36f4ebad9eb484cf6e3fccdecbbccfc6428" Jan 28 17:02:04 crc kubenswrapper[4877]: E0128 17:02:04.683102 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-55d5d56894-j6z8h_openstack(0d4944b1-cca6-4ace-9334-12dd2b981be8)\"" pod="openstack/heat-api-55d5d56894-j6z8h" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.697671 4877 generic.go:334] "Generic (PLEG): container finished" podID="4de40718-80f2-48ae-8929-32a2c0e96707" containerID="6060488dbff87400bada19b38a6cdf3f0320f043c26cf76910b39d6cbb32382f" exitCode=1 Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.697713 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" event={"ID":"4de40718-80f2-48ae-8929-32a2c0e96707","Type":"ContainerDied","Data":"6060488dbff87400bada19b38a6cdf3f0320f043c26cf76910b39d6cbb32382f"} Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.698512 4877 scope.go:117] "RemoveContainer" containerID="6060488dbff87400bada19b38a6cdf3f0320f043c26cf76910b39d6cbb32382f" Jan 28 17:02:04 crc kubenswrapper[4877]: E0128 17:02:04.698867 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-b5644bdb6-c7hcq_openstack(4de40718-80f2-48ae-8929-32a2c0e96707)\"" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" Jan 28 17:02:04 crc kubenswrapper[4877]: I0128 17:02:04.901803 4877 scope.go:117] "RemoveContainer" containerID="aa25a1b0108db58da8462e80478184b510b2f77a94185ea97365c7ae28118e8e" Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.367514 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55414a13-6cc8-42cb-bb48-610740b92289" path="/var/lib/kubelet/pods/55414a13-6cc8-42cb-bb48-610740b92289/volumes" Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.746248 4877 scope.go:117] "RemoveContainer" containerID="6060488dbff87400bada19b38a6cdf3f0320f043c26cf76910b39d6cbb32382f" Jan 28 17:02:05 crc kubenswrapper[4877]: E0128 17:02:05.746666 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-b5644bdb6-c7hcq_openstack(4de40718-80f2-48ae-8929-32a2c0e96707)\"" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.793393 4877 generic.go:334] "Generic (PLEG): container finished" podID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerID="dcd49f3dfa8ab48f70a26751cd6ce2d27dc33a551ceedb57b44c8724f4f67998" exitCode=2 Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.793422 4877 generic.go:334] "Generic (PLEG): container finished" podID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerID="e797f444d22b70e52dd0bab83d1d5e39fbf174e7de12276164ab230b18f208ae" exitCode=0 Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.793431 4877 generic.go:334] "Generic (PLEG): container finished" podID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerID="85e30d408e8e56325c43bfb76d65067668f2789838cbcc8a62569b11423b16df" exitCode=0 Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.793473 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerDied","Data":"dcd49f3dfa8ab48f70a26751cd6ce2d27dc33a551ceedb57b44c8724f4f67998"} Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.793526 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerDied","Data":"e797f444d22b70e52dd0bab83d1d5e39fbf174e7de12276164ab230b18f208ae"} Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.793537 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerDied","Data":"85e30d408e8e56325c43bfb76d65067668f2789838cbcc8a62569b11423b16df"} Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.822797 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"368f487a-75ec-44d5-94a2-807b0a152c5d","Type":"ContainerStarted","Data":"00df44c38c579ee5f78f52c8a3a0d84240cb8d47e3bab516339fb9ff9d770e7f"} Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.849624 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"fb414829-821c-4e0e-b099-26253c6a538a","Type":"ContainerStarted","Data":"0112fbbe1244fbee1ac303da3e26801a7c350da4061397b1753282a38ad46ce7"} Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.850043 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.883833 4877 scope.go:117] "RemoveContainer" containerID="17704eafeb2a43ead19f7f794704f36f4ebad9eb484cf6e3fccdecbbccfc6428" Jan 28 17:02:05 crc kubenswrapper[4877]: E0128 17:02:05.884121 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-55d5d56894-j6z8h_openstack(0d4944b1-cca6-4ace-9334-12dd2b981be8)\"" pod="openstack/heat-api-55d5d56894-j6z8h" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" Jan 28 17:02:05 crc kubenswrapper[4877]: I0128 17:02:05.907808 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.9077866 podStartE2EDuration="5.9077866s" podCreationTimestamp="2026-01-28 17:02:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:05.887686529 +0000 UTC m=+1629.446013427" watchObservedRunningTime="2026-01-28 17:02:05.9077866 +0000 UTC m=+1629.466113488" Jan 28 17:02:06 crc kubenswrapper[4877]: I0128 17:02:06.964022 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"368f487a-75ec-44d5-94a2-807b0a152c5d","Type":"ContainerStarted","Data":"0cc7539e406719495d2052511940001a9b8748893c9046169bc454eab894efd4"} Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.037013 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.036986904 podStartE2EDuration="7.036986904s" podCreationTimestamp="2026-01-28 17:02:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:07.008062905 +0000 UTC m=+1630.566389813" watchObservedRunningTime="2026-01-28 17:02:07.036986904 +0000 UTC m=+1630.595313792" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.092855 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.096155 4877 scope.go:117] "RemoveContainer" containerID="17704eafeb2a43ead19f7f794704f36f4ebad9eb484cf6e3fccdecbbccfc6428" Jan 28 17:02:07 crc kubenswrapper[4877]: E0128 17:02:07.096615 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-55d5d56894-j6z8h_openstack(0d4944b1-cca6-4ace-9334-12dd2b981be8)\"" pod="openstack/heat-api-55d5d56894-j6z8h" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.124727 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.126143 4877 scope.go:117] "RemoveContainer" containerID="6060488dbff87400bada19b38a6cdf3f0320f043c26cf76910b39d6cbb32382f" Jan 28 17:02:07 crc kubenswrapper[4877]: E0128 17:02:07.126444 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-b5644bdb6-c7hcq_openstack(4de40718-80f2-48ae-8929-32a2c0e96707)\"" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.385244 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.385969 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.387411 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 17:02:07 crc kubenswrapper[4877]: I0128 17:02:07.939784 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-69c986f6d7-bls2q" podUID="55414a13-6cc8-42cb-bb48-610740b92289" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.202:5353: i/o timeout" Jan 28 17:02:08 crc kubenswrapper[4877]: I0128 17:02:08.371290 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:02:09 crc kubenswrapper[4877]: I0128 17:02:09.303056 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:02:09 crc kubenswrapper[4877]: I0128 17:02:09.474114 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:02:09 crc kubenswrapper[4877]: I0128 17:02:09.553579 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-b5644bdb6-c7hcq"] Jan 28 17:02:09 crc kubenswrapper[4877]: I0128 17:02:09.725306 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:02:09 crc kubenswrapper[4877]: I0128 17:02:09.818469 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-55d5d56894-j6z8h"] Jan 28 17:02:10 crc kubenswrapper[4877]: I0128 17:02:10.907297 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:02:10 crc kubenswrapper[4877]: I0128 17:02:10.919628 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001318 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2kcl\" (UniqueName: \"kubernetes.io/projected/4de40718-80f2-48ae-8929-32a2c0e96707-kube-api-access-v2kcl\") pod \"4de40718-80f2-48ae-8929-32a2c0e96707\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001373 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data\") pod \"0d4944b1-cca6-4ace-9334-12dd2b981be8\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001457 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data-custom\") pod \"0d4944b1-cca6-4ace-9334-12dd2b981be8\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001572 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-combined-ca-bundle\") pod \"4de40718-80f2-48ae-8929-32a2c0e96707\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001631 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-combined-ca-bundle\") pod \"0d4944b1-cca6-4ace-9334-12dd2b981be8\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001704 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data-custom\") pod \"4de40718-80f2-48ae-8929-32a2c0e96707\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001727 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fdzz\" (UniqueName: \"kubernetes.io/projected/0d4944b1-cca6-4ace-9334-12dd2b981be8-kube-api-access-2fdzz\") pod \"0d4944b1-cca6-4ace-9334-12dd2b981be8\" (UID: \"0d4944b1-cca6-4ace-9334-12dd2b981be8\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.001762 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data\") pod \"4de40718-80f2-48ae-8929-32a2c0e96707\" (UID: \"4de40718-80f2-48ae-8929-32a2c0e96707\") " Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.028938 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55d5d56894-j6z8h" event={"ID":"0d4944b1-cca6-4ace-9334-12dd2b981be8","Type":"ContainerDied","Data":"31a6fcc2fb4b11bc5c560aa012f970c9270eb10f9b07b199b30081e65988a1e0"} Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.028989 4877 scope.go:117] "RemoveContainer" containerID="17704eafeb2a43ead19f7f794704f36f4ebad9eb484cf6e3fccdecbbccfc6428" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.029058 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55d5d56894-j6z8h" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.030829 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4de40718-80f2-48ae-8929-32a2c0e96707" (UID: "4de40718-80f2-48ae-8929-32a2c0e96707"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.031618 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0d4944b1-cca6-4ace-9334-12dd2b981be8" (UID: "0d4944b1-cca6-4ace-9334-12dd2b981be8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.041038 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" event={"ID":"4de40718-80f2-48ae-8929-32a2c0e96707","Type":"ContainerDied","Data":"5c671452fdabb2e000fbd49eff11431519c79159b5bfcd0d1eeae8b3995ec670"} Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.041155 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-b5644bdb6-c7hcq" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.045310 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4de40718-80f2-48ae-8929-32a2c0e96707-kube-api-access-v2kcl" (OuterVolumeSpecName: "kube-api-access-v2kcl") pod "4de40718-80f2-48ae-8929-32a2c0e96707" (UID: "4de40718-80f2-48ae-8929-32a2c0e96707"). InnerVolumeSpecName "kube-api-access-v2kcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.061758 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d4944b1-cca6-4ace-9334-12dd2b981be8-kube-api-access-2fdzz" (OuterVolumeSpecName: "kube-api-access-2fdzz") pod "0d4944b1-cca6-4ace-9334-12dd2b981be8" (UID: "0d4944b1-cca6-4ace-9334-12dd2b981be8"). InnerVolumeSpecName "kube-api-access-2fdzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.084003 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4de40718-80f2-48ae-8929-32a2c0e96707" (UID: "4de40718-80f2-48ae-8929-32a2c0e96707"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.108840 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2kcl\" (UniqueName: \"kubernetes.io/projected/4de40718-80f2-48ae-8929-32a2c0e96707-kube-api-access-v2kcl\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.108877 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.108889 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.108900 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.108912 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fdzz\" (UniqueName: \"kubernetes.io/projected/0d4944b1-cca6-4ace-9334-12dd2b981be8-kube-api-access-2fdzz\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.128178 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d4944b1-cca6-4ace-9334-12dd2b981be8" (UID: "0d4944b1-cca6-4ace-9334-12dd2b981be8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.178963 4877 scope.go:117] "RemoveContainer" containerID="6060488dbff87400bada19b38a6cdf3f0320f043c26cf76910b39d6cbb32382f" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.179441 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data" (OuterVolumeSpecName: "config-data") pod "4de40718-80f2-48ae-8929-32a2c0e96707" (UID: "4de40718-80f2-48ae-8929-32a2c0e96707"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.211955 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.212468 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4de40718-80f2-48ae-8929-32a2c0e96707-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.291628 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data" (OuterVolumeSpecName: "config-data") pod "0d4944b1-cca6-4ace-9334-12dd2b981be8" (UID: "0d4944b1-cca6-4ace-9334-12dd2b981be8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.315138 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d4944b1-cca6-4ace-9334-12dd2b981be8-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.420631 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.420934 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.427619 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-55d5d56894-j6z8h"] Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.476017 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-55d5d56894-j6z8h"] Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.498648 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.510948 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-b5644bdb6-c7hcq"] Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.540452 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-b5644bdb6-c7hcq"] Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.644248 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.826511 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.912849 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6c96dc67d-f972b"] Jan 28 17:02:11 crc kubenswrapper[4877]: I0128 17:02:11.913132 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-6c96dc67d-f972b" podUID="97c0facc-4ffb-4f83-86aa-68681d7c3661" containerName="heat-engine" containerID="cri-o://0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd" gracePeriod=60 Jan 28 17:02:12 crc kubenswrapper[4877]: I0128 17:02:12.102240 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 17:02:12 crc kubenswrapper[4877]: I0128 17:02:12.103190 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 17:02:12 crc kubenswrapper[4877]: I0128 17:02:12.333510 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:02:12 crc kubenswrapper[4877]: E0128 17:02:12.333863 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:02:13 crc kubenswrapper[4877]: I0128 17:02:13.346850 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" path="/var/lib/kubelet/pods/0d4944b1-cca6-4ace-9334-12dd2b981be8/volumes" Jan 28 17:02:13 crc kubenswrapper[4877]: I0128 17:02:13.347943 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" path="/var/lib/kubelet/pods/4de40718-80f2-48ae-8929-32a2c0e96707/volumes" Jan 28 17:02:14 crc kubenswrapper[4877]: I0128 17:02:14.030329 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:02:14 crc kubenswrapper[4877]: I0128 17:02:14.031720 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:02:14 crc kubenswrapper[4877]: I0128 17:02:14.139042 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:02:14 crc kubenswrapper[4877]: I0128 17:02:14.139072 4877 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 17:02:14 crc kubenswrapper[4877]: I0128 17:02:14.867213 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 17:02:14 crc kubenswrapper[4877]: I0128 17:02:14.867664 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 17:02:15 crc kubenswrapper[4877]: I0128 17:02:15.116840 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-hhmdh" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="registry-server" probeResult="failure" output=< Jan 28 17:02:15 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:02:15 crc kubenswrapper[4877]: > Jan 28 17:02:15 crc kubenswrapper[4877]: I0128 17:02:15.779763 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="fb414829-821c-4e0e-b099-26253c6a538a" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.229:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:02:16 crc kubenswrapper[4877]: I0128 17:02:16.085128 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:02:17 crc kubenswrapper[4877]: I0128 17:02:17.155768 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 17:02:17 crc kubenswrapper[4877]: I0128 17:02:17.810889 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="fb414829-821c-4e0e-b099-26253c6a538a" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.229:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:02:18 crc kubenswrapper[4877]: I0128 17:02:18.250838 4877 generic.go:334] "Generic (PLEG): container finished" podID="97c0facc-4ffb-4f83-86aa-68681d7c3661" containerID="0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd" exitCode=0 Jan 28 17:02:18 crc kubenswrapper[4877]: I0128 17:02:18.250892 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c96dc67d-f972b" event={"ID":"97c0facc-4ffb-4f83-86aa-68681d7c3661","Type":"ContainerDied","Data":"0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd"} Jan 28 17:02:19 crc kubenswrapper[4877]: I0128 17:02:19.840862 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 28 17:02:20 crc kubenswrapper[4877]: E0128 17:02:20.039614 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd is running failed: container process not found" containerID="0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:02:20 crc kubenswrapper[4877]: E0128 17:02:20.041517 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd is running failed: container process not found" containerID="0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:02:20 crc kubenswrapper[4877]: E0128 17:02:20.041870 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd is running failed: container process not found" containerID="0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:02:20 crc kubenswrapper[4877]: E0128 17:02:20.041904 4877 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-6c96dc67d-f972b" podUID="97c0facc-4ffb-4f83-86aa-68681d7c3661" containerName="heat-engine" Jan 28 17:02:21 crc kubenswrapper[4877]: I0128 17:02:21.651087 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6db6c6788c-7mlr2" Jan 28 17:02:21 crc kubenswrapper[4877]: I0128 17:02:21.852518 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7574dd45d-jbx2q"] Jan 28 17:02:21 crc kubenswrapper[4877]: I0128 17:02:21.852751 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7574dd45d-jbx2q" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-api" containerID="cri-o://bf410acad92d3276f75950cf794ecd8c3b21031823ac77c2d02323917bc3e376" gracePeriod=30 Jan 28 17:02:21 crc kubenswrapper[4877]: I0128 17:02:21.853411 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7574dd45d-jbx2q" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-httpd" containerID="cri-o://cdcf2e3c702d98ab739f7b2b88774bdd56cdcc7c08b392586bf84f18ad9f5fd0" gracePeriod=30 Jan 28 17:02:22 crc kubenswrapper[4877]: I0128 17:02:22.339953 4877 generic.go:334] "Generic (PLEG): container finished" podID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerID="cdcf2e3c702d98ab739f7b2b88774bdd56cdcc7c08b392586bf84f18ad9f5fd0" exitCode=0 Jan 28 17:02:22 crc kubenswrapper[4877]: I0128 17:02:22.340016 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7574dd45d-jbx2q" event={"ID":"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f","Type":"ContainerDied","Data":"cdcf2e3c702d98ab739f7b2b88774bdd56cdcc7c08b392586bf84f18ad9f5fd0"} Jan 28 17:02:25 crc kubenswrapper[4877]: I0128 17:02:25.100285 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-hhmdh" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="registry-server" probeResult="failure" output=< Jan 28 17:02:25 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:02:25 crc kubenswrapper[4877]: > Jan 28 17:02:25 crc kubenswrapper[4877]: I0128 17:02:25.336584 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:02:25 crc kubenswrapper[4877]: E0128 17:02:25.337037 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:02:26 crc kubenswrapper[4877]: E0128 17:02:26.772077 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified" Jan 28 17:02:26 crc kubenswrapper[4877]: E0128 17:02:26.773746 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell0-conductor-db-sync,Image:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CELL_NAME,Value:cell0,ValueFrom:nil,},EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:false,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-conductor-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gth75,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell0-conductor-db-sync-j4pm4_openstack(d0456757-a0e3-42a7-900f-422828fe9836): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:02:26 crc kubenswrapper[4877]: E0128 17:02:26.778581 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" podUID="d0456757-a0e3-42a7-900f-422828fe9836" Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.419695 4877 generic.go:334] "Generic (PLEG): container finished" podID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerID="bf410acad92d3276f75950cf794ecd8c3b21031823ac77c2d02323917bc3e376" exitCode=0 Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.419779 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7574dd45d-jbx2q" event={"ID":"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f","Type":"ContainerDied","Data":"bf410acad92d3276f75950cf794ecd8c3b21031823ac77c2d02323917bc3e376"} Jan 28 17:02:27 crc kubenswrapper[4877]: E0128 17:02:27.422460 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified\\\"\"" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" podUID="d0456757-a0e3-42a7-900f-422828fe9836" Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.875814 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.961260 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.988308 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-httpd-config\") pod \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.988417 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlldb\" (UniqueName: \"kubernetes.io/projected/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-kube-api-access-wlldb\") pod \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.988564 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-ovndb-tls-certs\") pod \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.988592 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-config\") pod \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " Jan 28 17:02:27 crc kubenswrapper[4877]: I0128 17:02:27.988647 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-combined-ca-bundle\") pod \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\" (UID: \"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f\") " Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.009809 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-kube-api-access-wlldb" (OuterVolumeSpecName: "kube-api-access-wlldb") pod "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" (UID: "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f"). InnerVolumeSpecName "kube-api-access-wlldb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.014057 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" (UID: "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.090208 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-combined-ca-bundle\") pod \"97c0facc-4ffb-4f83-86aa-68681d7c3661\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.090364 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data-custom\") pod \"97c0facc-4ffb-4f83-86aa-68681d7c3661\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.090394 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data\") pod \"97c0facc-4ffb-4f83-86aa-68681d7c3661\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.090544 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkgqg\" (UniqueName: \"kubernetes.io/projected/97c0facc-4ffb-4f83-86aa-68681d7c3661-kube-api-access-gkgqg\") pod \"97c0facc-4ffb-4f83-86aa-68681d7c3661\" (UID: \"97c0facc-4ffb-4f83-86aa-68681d7c3661\") " Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.091095 4877 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.091122 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlldb\" (UniqueName: \"kubernetes.io/projected/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-kube-api-access-wlldb\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.094701 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97c0facc-4ffb-4f83-86aa-68681d7c3661-kube-api-access-gkgqg" (OuterVolumeSpecName: "kube-api-access-gkgqg") pod "97c0facc-4ffb-4f83-86aa-68681d7c3661" (UID: "97c0facc-4ffb-4f83-86aa-68681d7c3661"). InnerVolumeSpecName "kube-api-access-gkgqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.094841 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "97c0facc-4ffb-4f83-86aa-68681d7c3661" (UID: "97c0facc-4ffb-4f83-86aa-68681d7c3661"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.112304 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-config" (OuterVolumeSpecName: "config") pod "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" (UID: "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.116848 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" (UID: "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.128567 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" (UID: "43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.131271 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97c0facc-4ffb-4f83-86aa-68681d7c3661" (UID: "97c0facc-4ffb-4f83-86aa-68681d7c3661"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.167703 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data" (OuterVolumeSpecName: "config-data") pod "97c0facc-4ffb-4f83-86aa-68681d7c3661" (UID: "97c0facc-4ffb-4f83-86aa-68681d7c3661"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.193574 4877 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.193615 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.193626 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkgqg\" (UniqueName: \"kubernetes.io/projected/97c0facc-4ffb-4f83-86aa-68681d7c3661-kube-api-access-gkgqg\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.193636 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.193645 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.193655 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.193667 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c0facc-4ffb-4f83-86aa-68681d7c3661-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.433208 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c96dc67d-f972b" event={"ID":"97c0facc-4ffb-4f83-86aa-68681d7c3661","Type":"ContainerDied","Data":"b1b047655a6be1c655a0b1a0802b1e9b08949a769925c8080d76bc0e4d598216"} Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.433283 4877 scope.go:117] "RemoveContainer" containerID="0bd8238c30d3609d31a4549fe38ca17915c0226cede78a1d251f601889371fcd" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.433284 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c96dc67d-f972b" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.439706 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7574dd45d-jbx2q" event={"ID":"43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f","Type":"ContainerDied","Data":"8e4a99d03e7ab3fd00f0f089be6439d42607d29eebfa3f1e4a4c01d8f01e8415"} Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.439814 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7574dd45d-jbx2q" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.482378 4877 scope.go:117] "RemoveContainer" containerID="cdcf2e3c702d98ab739f7b2b88774bdd56cdcc7c08b392586bf84f18ad9f5fd0" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.497187 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6c96dc67d-f972b"] Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.513112 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-6c96dc67d-f972b"] Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.519330 4877 scope.go:117] "RemoveContainer" containerID="bf410acad92d3276f75950cf794ecd8c3b21031823ac77c2d02323917bc3e376" Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.530384 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7574dd45d-jbx2q"] Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.546281 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7574dd45d-jbx2q"] Jan 28 17:02:28 crc kubenswrapper[4877]: I0128 17:02:28.914668 4877 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podfc2f618a-c56e-4c06-a365-be3073f2c2ae"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podfc2f618a-c56e-4c06-a365-be3073f2c2ae] : Timed out while waiting for systemd to remove kubepods-besteffort-podfc2f618a_c56e_4c06_a365_be3073f2c2ae.slice" Jan 28 17:02:28 crc kubenswrapper[4877]: E0128 17:02:28.914734 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podfc2f618a-c56e-4c06-a365-be3073f2c2ae] : unable to destroy cgroup paths for cgroup [kubepods besteffort podfc2f618a-c56e-4c06-a365-be3073f2c2ae] : Timed out while waiting for systemd to remove kubepods-besteffort-podfc2f618a_c56e_4c06_a365_be3073f2c2ae.slice" pod="openstack/dnsmasq-dns-78d5585959-hndnh" podUID="fc2f618a-c56e-4c06-a365-be3073f2c2ae" Jan 28 17:02:29 crc kubenswrapper[4877]: I0128 17:02:29.347221 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" path="/var/lib/kubelet/pods/43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f/volumes" Jan 28 17:02:29 crc kubenswrapper[4877]: I0128 17:02:29.348418 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97c0facc-4ffb-4f83-86aa-68681d7c3661" path="/var/lib/kubelet/pods/97c0facc-4ffb-4f83-86aa-68681d7c3661/volumes" Jan 28 17:02:29 crc kubenswrapper[4877]: I0128 17:02:29.455393 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d5585959-hndnh" Jan 28 17:02:29 crc kubenswrapper[4877]: I0128 17:02:29.542107 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78d5585959-hndnh"] Jan 28 17:02:29 crc kubenswrapper[4877]: I0128 17:02:29.563914 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78d5585959-hndnh"] Jan 28 17:02:31 crc kubenswrapper[4877]: I0128 17:02:31.343797 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc2f618a-c56e-4c06-a365-be3073f2c2ae" path="/var/lib/kubelet/pods/fc2f618a-c56e-4c06-a365-be3073f2c2ae/volumes" Jan 28 17:02:34 crc kubenswrapper[4877]: I0128 17:02:34.181418 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:02:34 crc kubenswrapper[4877]: I0128 17:02:34.241864 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:02:34 crc kubenswrapper[4877]: I0128 17:02:34.841571 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hhmdh"] Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.516342 4877 generic.go:334] "Generic (PLEG): container finished" podID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerID="1fddee63d2990da9c63b575f83c718fea9ee88781d6acd92b9ad88b8616fb1cd" exitCode=137 Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.516413 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerDied","Data":"1fddee63d2990da9c63b575f83c718fea9ee88781d6acd92b9ad88b8616fb1cd"} Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.518068 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hhmdh" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="registry-server" containerID="cri-o://0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1" gracePeriod=2 Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.631634 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.674724 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-sg-core-conf-yaml\") pod \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.674832 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-combined-ca-bundle\") pod \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.675062 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-config-data\") pod \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.675149 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-scripts\") pod \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.675183 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mj8w2\" (UniqueName: \"kubernetes.io/projected/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-kube-api-access-mj8w2\") pod \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.692184 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-kube-api-access-mj8w2" (OuterVolumeSpecName: "kube-api-access-mj8w2") pod "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" (UID: "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb"). InnerVolumeSpecName "kube-api-access-mj8w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.714867 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-scripts" (OuterVolumeSpecName: "scripts") pod "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" (UID: "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.757703 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" (UID: "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.777177 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-run-httpd\") pod \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.777231 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-log-httpd\") pod \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\" (UID: \"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb\") " Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.777768 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" (UID: "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.777975 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" (UID: "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.779599 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.779625 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.779639 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.779655 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.779667 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mj8w2\" (UniqueName: \"kubernetes.io/projected/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-kube-api-access-mj8w2\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.854634 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" (UID: "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.858835 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-config-data" (OuterVolumeSpecName: "config-data") pod "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" (UID: "d03740aa-84c5-46aa-b3f0-ee1bcb3843cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.882370 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:35 crc kubenswrapper[4877]: I0128 17:02:35.882419 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.078276 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.086953 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-utilities\") pod \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.087001 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-catalog-content\") pod \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.087138 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pfmb\" (UniqueName: \"kubernetes.io/projected/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-kube-api-access-8pfmb\") pod \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\" (UID: \"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728\") " Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.088499 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-utilities" (OuterVolumeSpecName: "utilities") pod "daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" (UID: "daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.092901 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-kube-api-access-8pfmb" (OuterVolumeSpecName: "kube-api-access-8pfmb") pod "daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" (UID: "daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728"). InnerVolumeSpecName "kube-api-access-8pfmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.160580 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" (UID: "daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.189942 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.189975 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.189987 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pfmb\" (UniqueName: \"kubernetes.io/projected/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728-kube-api-access-8pfmb\") on node \"crc\" DevicePath \"\"" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.534117 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03740aa-84c5-46aa-b3f0-ee1bcb3843cb","Type":"ContainerDied","Data":"83af15d5ecdab9a4e039d16c1810b4ad5dbdcc5cce2d39c03a4821af35b9a097"} Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.534241 4877 scope.go:117] "RemoveContainer" containerID="1fddee63d2990da9c63b575f83c718fea9ee88781d6acd92b9ad88b8616fb1cd" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.534251 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.540936 4877 generic.go:334] "Generic (PLEG): container finished" podID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerID="0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1" exitCode=0 Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.540988 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhmdh" event={"ID":"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728","Type":"ContainerDied","Data":"0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1"} Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.541020 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhmdh" event={"ID":"daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728","Type":"ContainerDied","Data":"d248fcbbb3147752bf0c9186a91839847b88f7c55a76496336a5799a51ab8bf2"} Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.541099 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhmdh" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.579353 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.588567 4877 scope.go:117] "RemoveContainer" containerID="dcd49f3dfa8ab48f70a26751cd6ce2d27dc33a551ceedb57b44c8724f4f67998" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.594231 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.605179 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hhmdh"] Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.616300 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hhmdh"] Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.637680 4877 scope.go:117] "RemoveContainer" containerID="e797f444d22b70e52dd0bab83d1d5e39fbf174e7de12276164ab230b18f208ae" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.665567 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666133 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-httpd" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666153 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-httpd" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666174 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="sg-core" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666182 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="sg-core" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666197 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55414a13-6cc8-42cb-bb48-610740b92289" containerName="init" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666205 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="55414a13-6cc8-42cb-bb48-610740b92289" containerName="init" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666215 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-notification-agent" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666223 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-notification-agent" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666240 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerName="heat-api" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666247 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerName="heat-api" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666269 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" containerName="heat-cfnapi" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666276 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" containerName="heat-cfnapi" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666292 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c0facc-4ffb-4f83-86aa-68681d7c3661" containerName="heat-engine" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666299 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c0facc-4ffb-4f83-86aa-68681d7c3661" containerName="heat-engine" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666314 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-api" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666321 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-api" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666335 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-central-agent" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666342 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-central-agent" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666356 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="extract-content" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666364 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="extract-content" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666381 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" containerName="heat-cfnapi" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666390 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" containerName="heat-cfnapi" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666409 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="proxy-httpd" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666415 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="proxy-httpd" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666432 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="extract-utilities" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666440 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="extract-utilities" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666455 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55414a13-6cc8-42cb-bb48-610740b92289" containerName="dnsmasq-dns" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666462 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="55414a13-6cc8-42cb-bb48-610740b92289" containerName="dnsmasq-dns" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.666496 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="registry-server" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666504 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="registry-server" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666759 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" containerName="registry-server" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666780 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-httpd" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666788 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-notification-agent" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666799 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="proxy-httpd" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666810 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="43e8b6e1-ff10-4daf-9b5d-72f3b39ce99f" containerName="neutron-api" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666823 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="sg-core" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666834 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c0facc-4ffb-4f83-86aa-68681d7c3661" containerName="heat-engine" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666851 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerName="heat-api" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666865 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" containerName="heat-cfnapi" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666878 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerName="heat-api" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666887 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" containerName="ceilometer-central-agent" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666902 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4de40718-80f2-48ae-8929-32a2c0e96707" containerName="heat-cfnapi" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.666911 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="55414a13-6cc8-42cb-bb48-610740b92289" containerName="dnsmasq-dns" Jan 28 17:02:36 crc kubenswrapper[4877]: E0128 17:02:36.667164 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerName="heat-api" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.667175 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4944b1-cca6-4ace-9334-12dd2b981be8" containerName="heat-api" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.669403 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.671776 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.672083 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.702158 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.707190 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-config-data\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.707363 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc2pm\" (UniqueName: \"kubernetes.io/projected/3c0b5629-09cf-4084-ba92-2f2c6a09921f-kube-api-access-sc2pm\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.707434 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-log-httpd\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.707510 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.707564 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-run-httpd\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.707804 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-scripts\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.711762 4877 scope.go:117] "RemoveContainer" containerID="85e30d408e8e56325c43bfb76d65067668f2789838cbcc8a62569b11423b16df" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.741933 4877 scope.go:117] "RemoveContainer" containerID="0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810284 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-config-data\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810353 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc2pm\" (UniqueName: \"kubernetes.io/projected/3c0b5629-09cf-4084-ba92-2f2c6a09921f-kube-api-access-sc2pm\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810388 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-log-httpd\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810413 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810434 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-run-httpd\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810610 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-scripts\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810710 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.810980 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-log-httpd\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.811199 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-run-httpd\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.815706 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.816413 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.817200 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-config-data\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.818018 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-scripts\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.825777 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.853378 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc2pm\" (UniqueName: \"kubernetes.io/projected/3c0b5629-09cf-4084-ba92-2f2c6a09921f-kube-api-access-sc2pm\") pod \"ceilometer-0\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " pod="openstack/ceilometer-0" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.856136 4877 scope.go:117] "RemoveContainer" containerID="297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.956343 4877 scope.go:117] "RemoveContainer" containerID="4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721" Jan 28 17:02:36 crc kubenswrapper[4877]: I0128 17:02:36.997297 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.022660 4877 scope.go:117] "RemoveContainer" containerID="0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1" Jan 28 17:02:37 crc kubenswrapper[4877]: E0128 17:02:37.023076 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1\": container with ID starting with 0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1 not found: ID does not exist" containerID="0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.023108 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1"} err="failed to get container status \"0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1\": rpc error: code = NotFound desc = could not find container \"0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1\": container with ID starting with 0dadce2063315347cc4d328890567815d297c6be4d1ca8ca2842098fefc111a1 not found: ID does not exist" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.023131 4877 scope.go:117] "RemoveContainer" containerID="297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1" Jan 28 17:02:37 crc kubenswrapper[4877]: E0128 17:02:37.023746 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1\": container with ID starting with 297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1 not found: ID does not exist" containerID="297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.023767 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1"} err="failed to get container status \"297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1\": rpc error: code = NotFound desc = could not find container \"297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1\": container with ID starting with 297daf0a98f80b9e82b35c6a4a9e969836d75aee6576bf4d6a376742366797b1 not found: ID does not exist" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.023780 4877 scope.go:117] "RemoveContainer" containerID="4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721" Jan 28 17:02:37 crc kubenswrapper[4877]: E0128 17:02:37.023993 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721\": container with ID starting with 4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721 not found: ID does not exist" containerID="4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.024014 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721"} err="failed to get container status \"4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721\": rpc error: code = NotFound desc = could not find container \"4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721\": container with ID starting with 4953d8b85023ef2a802914c168a43bae53d5578d1d0c189b29f2647874eaa721 not found: ID does not exist" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.347639 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d03740aa-84c5-46aa-b3f0-ee1bcb3843cb" path="/var/lib/kubelet/pods/d03740aa-84c5-46aa-b3f0-ee1bcb3843cb/volumes" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.349504 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728" path="/var/lib/kubelet/pods/daa3b3c0-3574-4ee2-a1ed-6ebe4ab96728/volumes" Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.621256 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:02:37 crc kubenswrapper[4877]: W0128 17:02:37.628657 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c0b5629_09cf_4084_ba92_2f2c6a09921f.slice/crio-3ee42c151eb70dab4868c2dc0e6e95d90359ece88a61c79fcaec14285c441324 WatchSource:0}: Error finding container 3ee42c151eb70dab4868c2dc0e6e95d90359ece88a61c79fcaec14285c441324: Status 404 returned error can't find the container with id 3ee42c151eb70dab4868c2dc0e6e95d90359ece88a61c79fcaec14285c441324 Jan 28 17:02:37 crc kubenswrapper[4877]: I0128 17:02:37.632974 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:02:38 crc kubenswrapper[4877]: I0128 17:02:38.574354 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerStarted","Data":"3ee42c151eb70dab4868c2dc0e6e95d90359ece88a61c79fcaec14285c441324"} Jan 28 17:02:38 crc kubenswrapper[4877]: I0128 17:02:38.658710 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:02:39 crc kubenswrapper[4877]: I0128 17:02:39.586328 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerStarted","Data":"1e8ddf2b35adc322d509b626eb851a7fd8a4601ef19c1e40942a613d49a83c4e"} Jan 28 17:02:40 crc kubenswrapper[4877]: I0128 17:02:40.331390 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:02:40 crc kubenswrapper[4877]: E0128 17:02:40.331831 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:02:40 crc kubenswrapper[4877]: I0128 17:02:40.599656 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerStarted","Data":"1a59aec1e4b9d26bd5aee2d799f2b56e098a26076fdfa7a2de596c7ebe98501b"} Jan 28 17:02:40 crc kubenswrapper[4877]: I0128 17:02:40.601470 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" event={"ID":"d0456757-a0e3-42a7-900f-422828fe9836","Type":"ContainerStarted","Data":"280d1e05976bf316ebf7fa080e91b03cd06512ba323bcc8e9eb0e6c4179b8f80"} Jan 28 17:02:40 crc kubenswrapper[4877]: I0128 17:02:40.620188 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" podStartSLOduration=2.776224342 podStartE2EDuration="38.62017255s" podCreationTimestamp="2026-01-28 17:02:02 +0000 UTC" firstStartedPulling="2026-01-28 17:02:04.370797834 +0000 UTC m=+1627.929124722" lastFinishedPulling="2026-01-28 17:02:40.214746042 +0000 UTC m=+1663.773072930" observedRunningTime="2026-01-28 17:02:40.617546639 +0000 UTC m=+1664.175873527" watchObservedRunningTime="2026-01-28 17:02:40.62017255 +0000 UTC m=+1664.178499428" Jan 28 17:02:41 crc kubenswrapper[4877]: I0128 17:02:41.615655 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerStarted","Data":"9a1e999bfd48c7a5ccb67a01f7dd434077c05ce67b67e067d8b5cf1abdcb5535"} Jan 28 17:02:43 crc kubenswrapper[4877]: I0128 17:02:43.640246 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerStarted","Data":"fab1f412dd4a7b09686c06672b52f2bfd683df0f5924c0a3986c3652d51bf948"} Jan 28 17:02:43 crc kubenswrapper[4877]: I0128 17:02:43.640465 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-central-agent" containerID="cri-o://1e8ddf2b35adc322d509b626eb851a7fd8a4601ef19c1e40942a613d49a83c4e" gracePeriod=30 Jan 28 17:02:43 crc kubenswrapper[4877]: I0128 17:02:43.640519 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="sg-core" containerID="cri-o://9a1e999bfd48c7a5ccb67a01f7dd434077c05ce67b67e067d8b5cf1abdcb5535" gracePeriod=30 Jan 28 17:02:43 crc kubenswrapper[4877]: I0128 17:02:43.640580 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-notification-agent" containerID="cri-o://1a59aec1e4b9d26bd5aee2d799f2b56e098a26076fdfa7a2de596c7ebe98501b" gracePeriod=30 Jan 28 17:02:43 crc kubenswrapper[4877]: I0128 17:02:43.640571 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="proxy-httpd" containerID="cri-o://fab1f412dd4a7b09686c06672b52f2bfd683df0f5924c0a3986c3652d51bf948" gracePeriod=30 Jan 28 17:02:43 crc kubenswrapper[4877]: I0128 17:02:43.641041 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:02:43 crc kubenswrapper[4877]: I0128 17:02:43.695748 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.195222311 podStartE2EDuration="7.695726313s" podCreationTimestamp="2026-01-28 17:02:36 +0000 UTC" firstStartedPulling="2026-01-28 17:02:37.632752241 +0000 UTC m=+1661.191079129" lastFinishedPulling="2026-01-28 17:02:43.133256243 +0000 UTC m=+1666.691583131" observedRunningTime="2026-01-28 17:02:43.693138373 +0000 UTC m=+1667.251465261" watchObservedRunningTime="2026-01-28 17:02:43.695726313 +0000 UTC m=+1667.254053201" Jan 28 17:02:44 crc kubenswrapper[4877]: I0128 17:02:44.675079 4877 generic.go:334] "Generic (PLEG): container finished" podID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerID="9a1e999bfd48c7a5ccb67a01f7dd434077c05ce67b67e067d8b5cf1abdcb5535" exitCode=2 Jan 28 17:02:44 crc kubenswrapper[4877]: I0128 17:02:44.676135 4877 generic.go:334] "Generic (PLEG): container finished" podID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerID="1a59aec1e4b9d26bd5aee2d799f2b56e098a26076fdfa7a2de596c7ebe98501b" exitCode=0 Jan 28 17:02:44 crc kubenswrapper[4877]: I0128 17:02:44.675262 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerDied","Data":"9a1e999bfd48c7a5ccb67a01f7dd434077c05ce67b67e067d8b5cf1abdcb5535"} Jan 28 17:02:44 crc kubenswrapper[4877]: I0128 17:02:44.676678 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerDied","Data":"1a59aec1e4b9d26bd5aee2d799f2b56e098a26076fdfa7a2de596c7ebe98501b"} Jan 28 17:02:49 crc kubenswrapper[4877]: I0128 17:02:49.744110 4877 generic.go:334] "Generic (PLEG): container finished" podID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerID="1e8ddf2b35adc322d509b626eb851a7fd8a4601ef19c1e40942a613d49a83c4e" exitCode=0 Jan 28 17:02:49 crc kubenswrapper[4877]: I0128 17:02:49.744212 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerDied","Data":"1e8ddf2b35adc322d509b626eb851a7fd8a4601ef19c1e40942a613d49a83c4e"} Jan 28 17:02:54 crc kubenswrapper[4877]: I0128 17:02:54.330986 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:02:54 crc kubenswrapper[4877]: E0128 17:02:54.331849 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.218166 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-j2jrv"] Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.219988 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.229871 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-j2jrv"] Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.371584 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-1655-account-create-update-vkpnq"] Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.373695 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.375913 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.398828 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-1655-account-create-update-vkpnq"] Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.404270 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e828bbc2-14a9-4ffd-8867-065b294666f4-operator-scripts\") pod \"aodh-db-create-j2jrv\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.404511 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cm2q\" (UniqueName: \"kubernetes.io/projected/e828bbc2-14a9-4ffd-8867-065b294666f4-kube-api-access-2cm2q\") pod \"aodh-db-create-j2jrv\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.506904 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e828bbc2-14a9-4ffd-8867-065b294666f4-operator-scripts\") pod \"aodh-db-create-j2jrv\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.507093 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crdvs\" (UniqueName: \"kubernetes.io/projected/2be3e552-8300-424e-90d5-93a278509c6b-kube-api-access-crdvs\") pod \"aodh-1655-account-create-update-vkpnq\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.507172 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cm2q\" (UniqueName: \"kubernetes.io/projected/e828bbc2-14a9-4ffd-8867-065b294666f4-kube-api-access-2cm2q\") pod \"aodh-db-create-j2jrv\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.507217 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2be3e552-8300-424e-90d5-93a278509c6b-operator-scripts\") pod \"aodh-1655-account-create-update-vkpnq\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.508918 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e828bbc2-14a9-4ffd-8867-065b294666f4-operator-scripts\") pod \"aodh-db-create-j2jrv\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.530580 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cm2q\" (UniqueName: \"kubernetes.io/projected/e828bbc2-14a9-4ffd-8867-065b294666f4-kube-api-access-2cm2q\") pod \"aodh-db-create-j2jrv\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.539632 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j2jrv" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.609907 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crdvs\" (UniqueName: \"kubernetes.io/projected/2be3e552-8300-424e-90d5-93a278509c6b-kube-api-access-crdvs\") pod \"aodh-1655-account-create-update-vkpnq\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.610017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2be3e552-8300-424e-90d5-93a278509c6b-operator-scripts\") pod \"aodh-1655-account-create-update-vkpnq\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.611061 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2be3e552-8300-424e-90d5-93a278509c6b-operator-scripts\") pod \"aodh-1655-account-create-update-vkpnq\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.630045 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crdvs\" (UniqueName: \"kubernetes.io/projected/2be3e552-8300-424e-90d5-93a278509c6b-kube-api-access-crdvs\") pod \"aodh-1655-account-create-update-vkpnq\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:56 crc kubenswrapper[4877]: I0128 17:02:56.704761 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:02:57 crc kubenswrapper[4877]: I0128 17:02:57.191744 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-j2jrv"] Jan 28 17:02:57 crc kubenswrapper[4877]: I0128 17:02:57.315064 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-1655-account-create-update-vkpnq"] Jan 28 17:02:57 crc kubenswrapper[4877]: I0128 17:02:57.334846 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 28 17:02:57 crc kubenswrapper[4877]: I0128 17:02:57.864248 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1655-account-create-update-vkpnq" event={"ID":"2be3e552-8300-424e-90d5-93a278509c6b","Type":"ContainerStarted","Data":"2b676a191a14c327deca49069f39db8a9f67aa7ccdd4c2f4f18e7ccf7daba4ce"} Jan 28 17:02:57 crc kubenswrapper[4877]: I0128 17:02:57.864717 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1655-account-create-update-vkpnq" event={"ID":"2be3e552-8300-424e-90d5-93a278509c6b","Type":"ContainerStarted","Data":"8522bc62116f77e7bc8d9ade497fa61a94dcb3080f96fd6991be21563e27d764"} Jan 28 17:02:57 crc kubenswrapper[4877]: I0128 17:02:57.866190 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j2jrv" event={"ID":"e828bbc2-14a9-4ffd-8867-065b294666f4","Type":"ContainerStarted","Data":"6cd98f618fe3677e799bab882f16f43fe0eec5ae61f33694d156a073118aca58"} Jan 28 17:02:57 crc kubenswrapper[4877]: I0128 17:02:57.866222 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j2jrv" event={"ID":"e828bbc2-14a9-4ffd-8867-065b294666f4","Type":"ContainerStarted","Data":"e2f0b0f73d787cf131e9f50397eb44eae588462fed100f92e4deff34e544b8fe"} Jan 28 17:02:58 crc kubenswrapper[4877]: I0128 17:02:58.896154 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-j2jrv" podStartSLOduration=2.896131929 podStartE2EDuration="2.896131929s" podCreationTimestamp="2026-01-28 17:02:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:58.89020002 +0000 UTC m=+1682.448526898" watchObservedRunningTime="2026-01-28 17:02:58.896131929 +0000 UTC m=+1682.454458827" Jan 28 17:02:58 crc kubenswrapper[4877]: I0128 17:02:58.915594 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-1655-account-create-update-vkpnq" podStartSLOduration=2.9155731830000002 podStartE2EDuration="2.915573183s" podCreationTimestamp="2026-01-28 17:02:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:02:58.904024142 +0000 UTC m=+1682.462351030" watchObservedRunningTime="2026-01-28 17:02:58.915573183 +0000 UTC m=+1682.473900081" Jan 28 17:02:59 crc kubenswrapper[4877]: I0128 17:02:59.888084 4877 generic.go:334] "Generic (PLEG): container finished" podID="e828bbc2-14a9-4ffd-8867-065b294666f4" containerID="6cd98f618fe3677e799bab882f16f43fe0eec5ae61f33694d156a073118aca58" exitCode=0 Jan 28 17:02:59 crc kubenswrapper[4877]: I0128 17:02:59.888154 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j2jrv" event={"ID":"e828bbc2-14a9-4ffd-8867-065b294666f4","Type":"ContainerDied","Data":"6cd98f618fe3677e799bab882f16f43fe0eec5ae61f33694d156a073118aca58"} Jan 28 17:03:00 crc kubenswrapper[4877]: I0128 17:03:00.348176 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" podUID="8355c6a7-af56-4b68-bd65-560a99273480" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.209:8000/healthcheck\": dial tcp 10.217.0.209:8000: connect: connection refused" Jan 28 17:03:00 crc kubenswrapper[4877]: I0128 17:03:00.361714 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-66b66545b5-ldnrl" podUID="db1f5546-0eed-4bf4-bd25-065718c91a46" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.210:8004/healthcheck\": dial tcp 10.217.0.210:8004: connect: connection refused" Jan 28 17:03:00 crc kubenswrapper[4877]: I0128 17:03:00.904416 4877 generic.go:334] "Generic (PLEG): container finished" podID="db1f5546-0eed-4bf4-bd25-065718c91a46" containerID="9221b44fad16ae9be88c646cb760d9c38358bb8592f689a92da95e8627118c98" exitCode=137 Jan 28 17:03:00 crc kubenswrapper[4877]: I0128 17:03:00.904549 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66b66545b5-ldnrl" event={"ID":"db1f5546-0eed-4bf4-bd25-065718c91a46","Type":"ContainerDied","Data":"9221b44fad16ae9be88c646cb760d9c38358bb8592f689a92da95e8627118c98"} Jan 28 17:03:00 crc kubenswrapper[4877]: I0128 17:03:00.907641 4877 generic.go:334] "Generic (PLEG): container finished" podID="8355c6a7-af56-4b68-bd65-560a99273480" containerID="27a6b7d4000d41610f5d798276ad6c12b77825333cc2be8a645d4374f57c34d8" exitCode=137 Jan 28 17:03:00 crc kubenswrapper[4877]: I0128 17:03:00.907736 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" event={"ID":"8355c6a7-af56-4b68-bd65-560a99273480","Type":"ContainerDied","Data":"27a6b7d4000d41610f5d798276ad6c12b77825333cc2be8a645d4374f57c34d8"} Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.425010 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.453761 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data\") pod \"db1f5546-0eed-4bf4-bd25-065718c91a46\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.453847 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qjhk\" (UniqueName: \"kubernetes.io/projected/db1f5546-0eed-4bf4-bd25-065718c91a46-kube-api-access-5qjhk\") pod \"db1f5546-0eed-4bf4-bd25-065718c91a46\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.454000 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data-custom\") pod \"db1f5546-0eed-4bf4-bd25-065718c91a46\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.454172 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-combined-ca-bundle\") pod \"db1f5546-0eed-4bf4-bd25-065718c91a46\" (UID: \"db1f5546-0eed-4bf4-bd25-065718c91a46\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.495989 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db1f5546-0eed-4bf4-bd25-065718c91a46-kube-api-access-5qjhk" (OuterVolumeSpecName: "kube-api-access-5qjhk") pod "db1f5546-0eed-4bf4-bd25-065718c91a46" (UID: "db1f5546-0eed-4bf4-bd25-065718c91a46"). InnerVolumeSpecName "kube-api-access-5qjhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.511072 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "db1f5546-0eed-4bf4-bd25-065718c91a46" (UID: "db1f5546-0eed-4bf4-bd25-065718c91a46"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.535178 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db1f5546-0eed-4bf4-bd25-065718c91a46" (UID: "db1f5546-0eed-4bf4-bd25-065718c91a46"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.557662 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qjhk\" (UniqueName: \"kubernetes.io/projected/db1f5546-0eed-4bf4-bd25-065718c91a46-kube-api-access-5qjhk\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.557705 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.557718 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.593767 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data" (OuterVolumeSpecName: "config-data") pod "db1f5546-0eed-4bf4-bd25-065718c91a46" (UID: "db1f5546-0eed-4bf4-bd25-065718c91a46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.660303 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f5546-0eed-4bf4-bd25-065718c91a46-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.689282 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j2jrv" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.698794 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.761579 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data\") pod \"8355c6a7-af56-4b68-bd65-560a99273480\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.761644 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-combined-ca-bundle\") pod \"8355c6a7-af56-4b68-bd65-560a99273480\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.761806 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfxzv\" (UniqueName: \"kubernetes.io/projected/8355c6a7-af56-4b68-bd65-560a99273480-kube-api-access-dfxzv\") pod \"8355c6a7-af56-4b68-bd65-560a99273480\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.761897 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e828bbc2-14a9-4ffd-8867-065b294666f4-operator-scripts\") pod \"e828bbc2-14a9-4ffd-8867-065b294666f4\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.762052 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data-custom\") pod \"8355c6a7-af56-4b68-bd65-560a99273480\" (UID: \"8355c6a7-af56-4b68-bd65-560a99273480\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.762101 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cm2q\" (UniqueName: \"kubernetes.io/projected/e828bbc2-14a9-4ffd-8867-065b294666f4-kube-api-access-2cm2q\") pod \"e828bbc2-14a9-4ffd-8867-065b294666f4\" (UID: \"e828bbc2-14a9-4ffd-8867-065b294666f4\") " Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.762773 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e828bbc2-14a9-4ffd-8867-065b294666f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e828bbc2-14a9-4ffd-8867-065b294666f4" (UID: "e828bbc2-14a9-4ffd-8867-065b294666f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.762979 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e828bbc2-14a9-4ffd-8867-065b294666f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.765698 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8355c6a7-af56-4b68-bd65-560a99273480-kube-api-access-dfxzv" (OuterVolumeSpecName: "kube-api-access-dfxzv") pod "8355c6a7-af56-4b68-bd65-560a99273480" (UID: "8355c6a7-af56-4b68-bd65-560a99273480"). InnerVolumeSpecName "kube-api-access-dfxzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.766114 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e828bbc2-14a9-4ffd-8867-065b294666f4-kube-api-access-2cm2q" (OuterVolumeSpecName: "kube-api-access-2cm2q") pod "e828bbc2-14a9-4ffd-8867-065b294666f4" (UID: "e828bbc2-14a9-4ffd-8867-065b294666f4"). InnerVolumeSpecName "kube-api-access-2cm2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.791969 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8355c6a7-af56-4b68-bd65-560a99273480" (UID: "8355c6a7-af56-4b68-bd65-560a99273480"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.806946 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8355c6a7-af56-4b68-bd65-560a99273480" (UID: "8355c6a7-af56-4b68-bd65-560a99273480"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.833632 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data" (OuterVolumeSpecName: "config-data") pod "8355c6a7-af56-4b68-bd65-560a99273480" (UID: "8355c6a7-af56-4b68-bd65-560a99273480"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.867577 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.867627 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cm2q\" (UniqueName: \"kubernetes.io/projected/e828bbc2-14a9-4ffd-8867-065b294666f4-kube-api-access-2cm2q\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.867658 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.867670 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8355c6a7-af56-4b68-bd65-560a99273480-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.867681 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfxzv\" (UniqueName: \"kubernetes.io/projected/8355c6a7-af56-4b68-bd65-560a99273480-kube-api-access-dfxzv\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.928920 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66b66545b5-ldnrl" event={"ID":"db1f5546-0eed-4bf4-bd25-065718c91a46","Type":"ContainerDied","Data":"dcde4862f2dfde1457c90c189c7a6f2913426d4c6f866e4a8e2a8d2c3192910b"} Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.928972 4877 scope.go:117] "RemoveContainer" containerID="9221b44fad16ae9be88c646cb760d9c38358bb8592f689a92da95e8627118c98" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.929110 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66b66545b5-ldnrl" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.934722 4877 generic.go:334] "Generic (PLEG): container finished" podID="2be3e552-8300-424e-90d5-93a278509c6b" containerID="2b676a191a14c327deca49069f39db8a9f67aa7ccdd4c2f4f18e7ccf7daba4ce" exitCode=0 Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.934840 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1655-account-create-update-vkpnq" event={"ID":"2be3e552-8300-424e-90d5-93a278509c6b","Type":"ContainerDied","Data":"2b676a191a14c327deca49069f39db8a9f67aa7ccdd4c2f4f18e7ccf7daba4ce"} Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.937882 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" event={"ID":"8355c6a7-af56-4b68-bd65-560a99273480","Type":"ContainerDied","Data":"888f1b55fc6b7fd67c0a4b1f09a42575075e30cb1a53082038275a86d8587c48"} Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.937971 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7486d7b6df-vf9q9" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.941315 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-j2jrv" event={"ID":"e828bbc2-14a9-4ffd-8867-065b294666f4","Type":"ContainerDied","Data":"e2f0b0f73d787cf131e9f50397eb44eae588462fed100f92e4deff34e544b8fe"} Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.941357 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2f0b0f73d787cf131e9f50397eb44eae588462fed100f92e4deff34e544b8fe" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.941441 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-j2jrv" Jan 28 17:03:01 crc kubenswrapper[4877]: I0128 17:03:01.969396 4877 scope.go:117] "RemoveContainer" containerID="27a6b7d4000d41610f5d798276ad6c12b77825333cc2be8a645d4374f57c34d8" Jan 28 17:03:02 crc kubenswrapper[4877]: I0128 17:03:02.006872 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7486d7b6df-vf9q9"] Jan 28 17:03:02 crc kubenswrapper[4877]: I0128 17:03:02.019549 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7486d7b6df-vf9q9"] Jan 28 17:03:02 crc kubenswrapper[4877]: I0128 17:03:02.030433 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-66b66545b5-ldnrl"] Jan 28 17:03:02 crc kubenswrapper[4877]: I0128 17:03:02.043583 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-66b66545b5-ldnrl"] Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.343312 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8355c6a7-af56-4b68-bd65-560a99273480" path="/var/lib/kubelet/pods/8355c6a7-af56-4b68-bd65-560a99273480/volumes" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.344404 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db1f5546-0eed-4bf4-bd25-065718c91a46" path="/var/lib/kubelet/pods/db1f5546-0eed-4bf4-bd25-065718c91a46/volumes" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.419154 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.506431 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crdvs\" (UniqueName: \"kubernetes.io/projected/2be3e552-8300-424e-90d5-93a278509c6b-kube-api-access-crdvs\") pod \"2be3e552-8300-424e-90d5-93a278509c6b\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.506544 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2be3e552-8300-424e-90d5-93a278509c6b-operator-scripts\") pod \"2be3e552-8300-424e-90d5-93a278509c6b\" (UID: \"2be3e552-8300-424e-90d5-93a278509c6b\") " Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.507044 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be3e552-8300-424e-90d5-93a278509c6b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2be3e552-8300-424e-90d5-93a278509c6b" (UID: "2be3e552-8300-424e-90d5-93a278509c6b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.507939 4877 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2be3e552-8300-424e-90d5-93a278509c6b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.514674 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2be3e552-8300-424e-90d5-93a278509c6b-kube-api-access-crdvs" (OuterVolumeSpecName: "kube-api-access-crdvs") pod "2be3e552-8300-424e-90d5-93a278509c6b" (UID: "2be3e552-8300-424e-90d5-93a278509c6b"). InnerVolumeSpecName "kube-api-access-crdvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.609929 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crdvs\" (UniqueName: \"kubernetes.io/projected/2be3e552-8300-424e-90d5-93a278509c6b-kube-api-access-crdvs\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.967382 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1655-account-create-update-vkpnq" event={"ID":"2be3e552-8300-424e-90d5-93a278509c6b","Type":"ContainerDied","Data":"8522bc62116f77e7bc8d9ade497fa61a94dcb3080f96fd6991be21563e27d764"} Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.967430 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8522bc62116f77e7bc8d9ade497fa61a94dcb3080f96fd6991be21563e27d764" Jan 28 17:03:03 crc kubenswrapper[4877]: I0128 17:03:03.967465 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1655-account-create-update-vkpnq" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.700668 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-gtz9w"] Jan 28 17:03:06 crc kubenswrapper[4877]: E0128 17:03:06.701565 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8355c6a7-af56-4b68-bd65-560a99273480" containerName="heat-cfnapi" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.701587 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8355c6a7-af56-4b68-bd65-560a99273480" containerName="heat-cfnapi" Jan 28 17:03:06 crc kubenswrapper[4877]: E0128 17:03:06.701602 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2be3e552-8300-424e-90d5-93a278509c6b" containerName="mariadb-account-create-update" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.701612 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be3e552-8300-424e-90d5-93a278509c6b" containerName="mariadb-account-create-update" Jan 28 17:03:06 crc kubenswrapper[4877]: E0128 17:03:06.701631 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db1f5546-0eed-4bf4-bd25-065718c91a46" containerName="heat-api" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.701639 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="db1f5546-0eed-4bf4-bd25-065718c91a46" containerName="heat-api" Jan 28 17:03:06 crc kubenswrapper[4877]: E0128 17:03:06.701673 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e828bbc2-14a9-4ffd-8867-065b294666f4" containerName="mariadb-database-create" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.701682 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e828bbc2-14a9-4ffd-8867-065b294666f4" containerName="mariadb-database-create" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.701963 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="db1f5546-0eed-4bf4-bd25-065718c91a46" containerName="heat-api" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.701979 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8355c6a7-af56-4b68-bd65-560a99273480" containerName="heat-cfnapi" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.701995 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e828bbc2-14a9-4ffd-8867-065b294666f4" containerName="mariadb-database-create" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.702010 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="2be3e552-8300-424e-90d5-93a278509c6b" containerName="mariadb-account-create-update" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.702893 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.706023 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.706350 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-nxphm" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.706377 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.708959 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.715207 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-gtz9w"] Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.787559 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shjdc\" (UniqueName: \"kubernetes.io/projected/894b6108-3063-40a2-809a-b8f8393b3ecc-kube-api-access-shjdc\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.787607 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-combined-ca-bundle\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.787674 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-scripts\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.787706 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-config-data\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.889714 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-scripts\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.889788 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-config-data\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.889952 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shjdc\" (UniqueName: \"kubernetes.io/projected/894b6108-3063-40a2-809a-b8f8393b3ecc-kube-api-access-shjdc\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.889989 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-combined-ca-bundle\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.897051 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-scripts\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.897715 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-combined-ca-bundle\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.897888 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-config-data\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:06 crc kubenswrapper[4877]: I0128 17:03:06.913163 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shjdc\" (UniqueName: \"kubernetes.io/projected/894b6108-3063-40a2-809a-b8f8393b3ecc-kube-api-access-shjdc\") pod \"aodh-db-sync-gtz9w\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:07 crc kubenswrapper[4877]: I0128 17:03:07.004165 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 17:03:07 crc kubenswrapper[4877]: I0128 17:03:07.032437 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:07 crc kubenswrapper[4877]: I0128 17:03:07.342799 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:03:07 crc kubenswrapper[4877]: E0128 17:03:07.343279 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:03:07 crc kubenswrapper[4877]: I0128 17:03:07.621183 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-gtz9w"] Jan 28 17:03:08 crc kubenswrapper[4877]: I0128 17:03:08.065631 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gtz9w" event={"ID":"894b6108-3063-40a2-809a-b8f8393b3ecc","Type":"ContainerStarted","Data":"c1bd4f7dbb879f5c3750c82b453f87f34ce1c09c1e87f04f908b48b8455b71d1"} Jan 28 17:03:13 crc kubenswrapper[4877]: E0128 17:03:13.860212 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8355c6a7_af56_4b68_bd65_560a99273480.slice/crio-888f1b55fc6b7fd67c0a4b1f09a42575075e30cb1a53082038275a86d8587c48\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode828bbc2_14a9_4ffd_8867_065b294666f4.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode828bbc2_14a9_4ffd_8867_065b294666f4.slice/crio-e2f0b0f73d787cf131e9f50397eb44eae588462fed100f92e4deff34e544b8fe\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode828bbc2_14a9_4ffd_8867_065b294666f4.slice/crio-conmon-6cd98f618fe3677e799bab882f16f43fe0eec5ae61f33694d156a073118aca58.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8355c6a7_af56_4b68_bd65_560a99273480.slice/crio-conmon-27a6b7d4000d41610f5d798276ad6c12b77825333cc2be8a645d4374f57c34d8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb1f5546_0eed_4bf4_bd25_065718c91a46.slice/crio-conmon-9221b44fad16ae9be88c646cb760d9c38358bb8592f689a92da95e8627118c98.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb1f5546_0eed_4bf4_bd25_065718c91a46.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2be3e552_8300_424e_90d5_93a278509c6b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2be3e552_8300_424e_90d5_93a278509c6b.slice/crio-8522bc62116f77e7bc8d9ade497fa61a94dcb3080f96fd6991be21563e27d764\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb1f5546_0eed_4bf4_bd25_065718c91a46.slice/crio-dcde4862f2dfde1457c90c189c7a6f2913426d4c6f866e4a8e2a8d2c3192910b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c0b5629_09cf_4084_ba92_2f2c6a09921f.slice/crio-fab1f412dd4a7b09686c06672b52f2bfd683df0f5924c0a3986c3652d51bf948.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8355c6a7_af56_4b68_bd65_560a99273480.slice/crio-27a6b7d4000d41610f5d798276ad6c12b77825333cc2be8a645d4374f57c34d8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb1f5546_0eed_4bf4_bd25_065718c91a46.slice/crio-9221b44fad16ae9be88c646cb760d9c38358bb8592f689a92da95e8627118c98.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode828bbc2_14a9_4ffd_8867_065b294666f4.slice/crio-6cd98f618fe3677e799bab882f16f43fe0eec5ae61f33694d156a073118aca58.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c0b5629_09cf_4084_ba92_2f2c6a09921f.slice/crio-conmon-fab1f412dd4a7b09686c06672b52f2bfd683df0f5924c0a3986c3652d51bf948.scope\": RecentStats: unable to find data in memory cache]" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.194773 4877 generic.go:334] "Generic (PLEG): container finished" podID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerID="fab1f412dd4a7b09686c06672b52f2bfd683df0f5924c0a3986c3652d51bf948" exitCode=137 Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.195101 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerDied","Data":"fab1f412dd4a7b09686c06672b52f2bfd683df0f5924c0a3986c3652d51bf948"} Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.688907 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.769400 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-combined-ca-bundle\") pod \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.769488 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-config-data\") pod \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.769625 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-sg-core-conf-yaml\") pod \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.769736 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sc2pm\" (UniqueName: \"kubernetes.io/projected/3c0b5629-09cf-4084-ba92-2f2c6a09921f-kube-api-access-sc2pm\") pod \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.769866 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-scripts\") pod \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.769956 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-log-httpd\") pod \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.770042 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-run-httpd\") pod \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\" (UID: \"3c0b5629-09cf-4084-ba92-2f2c6a09921f\") " Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.784269 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3c0b5629-09cf-4084-ba92-2f2c6a09921f" (UID: "3c0b5629-09cf-4084-ba92-2f2c6a09921f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.787880 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3c0b5629-09cf-4084-ba92-2f2c6a09921f" (UID: "3c0b5629-09cf-4084-ba92-2f2c6a09921f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.798778 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-scripts" (OuterVolumeSpecName: "scripts") pod "3c0b5629-09cf-4084-ba92-2f2c6a09921f" (UID: "3c0b5629-09cf-4084-ba92-2f2c6a09921f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.802816 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c0b5629-09cf-4084-ba92-2f2c6a09921f-kube-api-access-sc2pm" (OuterVolumeSpecName: "kube-api-access-sc2pm") pod "3c0b5629-09cf-4084-ba92-2f2c6a09921f" (UID: "3c0b5629-09cf-4084-ba92-2f2c6a09921f"). InnerVolumeSpecName "kube-api-access-sc2pm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.829716 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.829795 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3c0b5629-09cf-4084-ba92-2f2c6a09921f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.829813 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sc2pm\" (UniqueName: \"kubernetes.io/projected/3c0b5629-09cf-4084-ba92-2f2c6a09921f-kube-api-access-sc2pm\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.829832 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.876409 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3c0b5629-09cf-4084-ba92-2f2c6a09921f" (UID: "3c0b5629-09cf-4084-ba92-2f2c6a09921f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.932280 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.949190 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c0b5629-09cf-4084-ba92-2f2c6a09921f" (UID: "3c0b5629-09cf-4084-ba92-2f2c6a09921f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:14 crc kubenswrapper[4877]: I0128 17:03:14.959273 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-config-data" (OuterVolumeSpecName: "config-data") pod "3c0b5629-09cf-4084-ba92-2f2c6a09921f" (UID: "3c0b5629-09cf-4084-ba92-2f2c6a09921f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.035078 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.035133 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0b5629-09cf-4084-ba92-2f2c6a09921f-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.215217 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3c0b5629-09cf-4084-ba92-2f2c6a09921f","Type":"ContainerDied","Data":"3ee42c151eb70dab4868c2dc0e6e95d90359ece88a61c79fcaec14285c441324"} Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.215764 4877 scope.go:117] "RemoveContainer" containerID="fab1f412dd4a7b09686c06672b52f2bfd683df0f5924c0a3986c3652d51bf948" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.215308 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.225597 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gtz9w" event={"ID":"894b6108-3063-40a2-809a-b8f8393b3ecc","Type":"ContainerStarted","Data":"fd0204b269633a3159eee494d9bcdccef134ea300f9c1ebc5cc001c7a7209c49"} Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.253960 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-gtz9w" podStartSLOduration=2.3876107060000002 podStartE2EDuration="9.25392628s" podCreationTimestamp="2026-01-28 17:03:06 +0000 UTC" firstStartedPulling="2026-01-28 17:03:07.619550335 +0000 UTC m=+1691.177877223" lastFinishedPulling="2026-01-28 17:03:14.485865899 +0000 UTC m=+1698.044192797" observedRunningTime="2026-01-28 17:03:15.249654785 +0000 UTC m=+1698.807981683" watchObservedRunningTime="2026-01-28 17:03:15.25392628 +0000 UTC m=+1698.812253168" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.279895 4877 scope.go:117] "RemoveContainer" containerID="9a1e999bfd48c7a5ccb67a01f7dd434077c05ce67b67e067d8b5cf1abdcb5535" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.314279 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.340029 4877 scope.go:117] "RemoveContainer" containerID="1a59aec1e4b9d26bd5aee2d799f2b56e098a26076fdfa7a2de596c7ebe98501b" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.363136 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.368392 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:15 crc kubenswrapper[4877]: E0128 17:03:15.372505 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="sg-core" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.372562 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="sg-core" Jan 28 17:03:15 crc kubenswrapper[4877]: E0128 17:03:15.372619 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="proxy-httpd" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.372628 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="proxy-httpd" Jan 28 17:03:15 crc kubenswrapper[4877]: E0128 17:03:15.372657 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-central-agent" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.372664 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-central-agent" Jan 28 17:03:15 crc kubenswrapper[4877]: E0128 17:03:15.372702 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-notification-agent" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.372710 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-notification-agent" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.373318 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-central-agent" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.373340 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="ceilometer-notification-agent" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.373353 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="proxy-httpd" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.373365 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" containerName="sg-core" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.376229 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.378948 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.379250 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.383694 4877 scope.go:117] "RemoveContainer" containerID="1e8ddf2b35adc322d509b626eb851a7fd8a4601ef19c1e40942a613d49a83c4e" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.386773 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.443983 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.444054 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-run-httpd\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.444123 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-scripts\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.444306 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-log-httpd\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.444630 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjjth\" (UniqueName: \"kubernetes.io/projected/67eb2c94-ae13-4c83-9344-bfe125a62267-kube-api-access-gjjth\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.444858 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-config-data\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.444915 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.549820 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-config-data\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.549883 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.549972 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.549989 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-run-httpd\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.550038 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-scripts\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.550093 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-log-httpd\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.550167 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjjth\" (UniqueName: \"kubernetes.io/projected/67eb2c94-ae13-4c83-9344-bfe125a62267-kube-api-access-gjjth\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.552110 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-run-httpd\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.554254 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-log-httpd\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.556294 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-scripts\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.556671 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-config-data\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.557053 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.562843 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.568701 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjjth\" (UniqueName: \"kubernetes.io/projected/67eb2c94-ae13-4c83-9344-bfe125a62267-kube-api-access-gjjth\") pod \"ceilometer-0\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " pod="openstack/ceilometer-0" Jan 28 17:03:15 crc kubenswrapper[4877]: I0128 17:03:15.709375 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:16 crc kubenswrapper[4877]: W0128 17:03:16.277769 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67eb2c94_ae13_4c83_9344_bfe125a62267.slice/crio-af28dc0c398f81e056a2efe9b00ad7dd5e37e089a313e79f28d40a62c9842be3 WatchSource:0}: Error finding container af28dc0c398f81e056a2efe9b00ad7dd5e37e089a313e79f28d40a62c9842be3: Status 404 returned error can't find the container with id af28dc0c398f81e056a2efe9b00ad7dd5e37e089a313e79f28d40a62c9842be3 Jan 28 17:03:16 crc kubenswrapper[4877]: I0128 17:03:16.289624 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:17 crc kubenswrapper[4877]: I0128 17:03:17.256674 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerStarted","Data":"af28dc0c398f81e056a2efe9b00ad7dd5e37e089a313e79f28d40a62c9842be3"} Jan 28 17:03:17 crc kubenswrapper[4877]: I0128 17:03:17.350389 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c0b5629-09cf-4084-ba92-2f2c6a09921f" path="/var/lib/kubelet/pods/3c0b5629-09cf-4084-ba92-2f2c6a09921f/volumes" Jan 28 17:03:18 crc kubenswrapper[4877]: I0128 17:03:18.270872 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerStarted","Data":"8b419eaa0a496a37f2f9cb39c4fdd94f1f3a2d919221c36ac37f68698eb9f4fe"} Jan 28 17:03:19 crc kubenswrapper[4877]: I0128 17:03:19.342822 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:03:19 crc kubenswrapper[4877]: E0128 17:03:19.343309 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:03:20 crc kubenswrapper[4877]: I0128 17:03:20.297279 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerStarted","Data":"9553c723a79386531c541f329309a82c12c77f04092420f1c03ea377ee5af7fa"} Jan 28 17:03:22 crc kubenswrapper[4877]: I0128 17:03:22.322170 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerStarted","Data":"6c297981962d34967bc4fc0b1bebc07b2dad63ab349af54ddf16af5e3c869dc9"} Jan 28 17:03:23 crc kubenswrapper[4877]: I0128 17:03:23.335454 4877 generic.go:334] "Generic (PLEG): container finished" podID="894b6108-3063-40a2-809a-b8f8393b3ecc" containerID="fd0204b269633a3159eee494d9bcdccef134ea300f9c1ebc5cc001c7a7209c49" exitCode=0 Jan 28 17:03:23 crc kubenswrapper[4877]: I0128 17:03:23.346568 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gtz9w" event={"ID":"894b6108-3063-40a2-809a-b8f8393b3ecc","Type":"ContainerDied","Data":"fd0204b269633a3159eee494d9bcdccef134ea300f9c1ebc5cc001c7a7209c49"} Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.797024 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.907014 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shjdc\" (UniqueName: \"kubernetes.io/projected/894b6108-3063-40a2-809a-b8f8393b3ecc-kube-api-access-shjdc\") pod \"894b6108-3063-40a2-809a-b8f8393b3ecc\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.907113 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-scripts\") pod \"894b6108-3063-40a2-809a-b8f8393b3ecc\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.907164 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-config-data\") pod \"894b6108-3063-40a2-809a-b8f8393b3ecc\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.907278 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-combined-ca-bundle\") pod \"894b6108-3063-40a2-809a-b8f8393b3ecc\" (UID: \"894b6108-3063-40a2-809a-b8f8393b3ecc\") " Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.912419 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-scripts" (OuterVolumeSpecName: "scripts") pod "894b6108-3063-40a2-809a-b8f8393b3ecc" (UID: "894b6108-3063-40a2-809a-b8f8393b3ecc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.912751 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/894b6108-3063-40a2-809a-b8f8393b3ecc-kube-api-access-shjdc" (OuterVolumeSpecName: "kube-api-access-shjdc") pod "894b6108-3063-40a2-809a-b8f8393b3ecc" (UID: "894b6108-3063-40a2-809a-b8f8393b3ecc"). InnerVolumeSpecName "kube-api-access-shjdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.949997 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-config-data" (OuterVolumeSpecName: "config-data") pod "894b6108-3063-40a2-809a-b8f8393b3ecc" (UID: "894b6108-3063-40a2-809a-b8f8393b3ecc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:24 crc kubenswrapper[4877]: I0128 17:03:24.950840 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "894b6108-3063-40a2-809a-b8f8393b3ecc" (UID: "894b6108-3063-40a2-809a-b8f8393b3ecc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.010014 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shjdc\" (UniqueName: \"kubernetes.io/projected/894b6108-3063-40a2-809a-b8f8393b3ecc-kube-api-access-shjdc\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.010054 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.010067 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.010078 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/894b6108-3063-40a2-809a-b8f8393b3ecc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.367449 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerStarted","Data":"f8da18adc6f257ab1290f60598e29e603cbf9a88466b4b861b741f049e8d9e5e"} Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.367762 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.370365 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gtz9w" event={"ID":"894b6108-3063-40a2-809a-b8f8393b3ecc","Type":"ContainerDied","Data":"c1bd4f7dbb879f5c3750c82b453f87f34ce1c09c1e87f04f908b48b8455b71d1"} Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.370408 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1bd4f7dbb879f5c3750c82b453f87f34ce1c09c1e87f04f908b48b8455b71d1" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.370531 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gtz9w" Jan 28 17:03:25 crc kubenswrapper[4877]: I0128 17:03:25.401887 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.10089089 podStartE2EDuration="10.401868361s" podCreationTimestamp="2026-01-28 17:03:15 +0000 UTC" firstStartedPulling="2026-01-28 17:03:16.280593441 +0000 UTC m=+1699.838920329" lastFinishedPulling="2026-01-28 17:03:24.581570912 +0000 UTC m=+1708.139897800" observedRunningTime="2026-01-28 17:03:25.387674048 +0000 UTC m=+1708.946000936" watchObservedRunningTime="2026-01-28 17:03:25.401868361 +0000 UTC m=+1708.960195249" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.840593 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 17:03:26 crc kubenswrapper[4877]: E0128 17:03:26.841446 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="894b6108-3063-40a2-809a-b8f8393b3ecc" containerName="aodh-db-sync" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.841464 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="894b6108-3063-40a2-809a-b8f8393b3ecc" containerName="aodh-db-sync" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.841790 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="894b6108-3063-40a2-809a-b8f8393b3ecc" containerName="aodh-db-sync" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.854791 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.859947 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.860246 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.860542 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-nxphm" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.875857 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.958438 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-scripts\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.958575 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-config-data\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.958636 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:26 crc kubenswrapper[4877]: I0128 17:03:26.958793 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcnzd\" (UniqueName: \"kubernetes.io/projected/04e4e30f-0823-4599-b3c3-b71a8630547f-kube-api-access-bcnzd\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.060742 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-config-data\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.060867 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.061044 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcnzd\" (UniqueName: \"kubernetes.io/projected/04e4e30f-0823-4599-b3c3-b71a8630547f-kube-api-access-bcnzd\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.061096 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-scripts\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.068608 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-config-data\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.072856 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-combined-ca-bundle\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.077935 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-scripts\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.083206 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcnzd\" (UniqueName: \"kubernetes.io/projected/04e4e30f-0823-4599-b3c3-b71a8630547f-kube-api-access-bcnzd\") pod \"aodh-0\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.183407 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:03:27 crc kubenswrapper[4877]: I0128 17:03:27.772296 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 17:03:27 crc kubenswrapper[4877]: W0128 17:03:27.784300 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04e4e30f_0823_4599_b3c3_b71a8630547f.slice/crio-6fedd884eba7d7495b298097d774d64057bc01b05585ce4f3a7fc8446abacab5 WatchSource:0}: Error finding container 6fedd884eba7d7495b298097d774d64057bc01b05585ce4f3a7fc8446abacab5: Status 404 returned error can't find the container with id 6fedd884eba7d7495b298097d774d64057bc01b05585ce4f3a7fc8446abacab5 Jan 28 17:03:28 crc kubenswrapper[4877]: I0128 17:03:28.409837 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerStarted","Data":"6fedd884eba7d7495b298097d774d64057bc01b05585ce4f3a7fc8446abacab5"} Jan 28 17:03:29 crc kubenswrapper[4877]: I0128 17:03:29.422751 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerStarted","Data":"c253ce01bf3aa86a94d013e3a7fac2725ea4e3bd96ed54d591dc9dde378f6012"} Jan 28 17:03:30 crc kubenswrapper[4877]: I0128 17:03:30.175209 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 17:03:30 crc kubenswrapper[4877]: I0128 17:03:30.680865 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:30 crc kubenswrapper[4877]: I0128 17:03:30.681398 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-central-agent" containerID="cri-o://8b419eaa0a496a37f2f9cb39c4fdd94f1f3a2d919221c36ac37f68698eb9f4fe" gracePeriod=30 Jan 28 17:03:30 crc kubenswrapper[4877]: I0128 17:03:30.682048 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="proxy-httpd" containerID="cri-o://f8da18adc6f257ab1290f60598e29e603cbf9a88466b4b861b741f049e8d9e5e" gracePeriod=30 Jan 28 17:03:30 crc kubenswrapper[4877]: I0128 17:03:30.682152 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="sg-core" containerID="cri-o://6c297981962d34967bc4fc0b1bebc07b2dad63ab349af54ddf16af5e3c869dc9" gracePeriod=30 Jan 28 17:03:30 crc kubenswrapper[4877]: I0128 17:03:30.682199 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-notification-agent" containerID="cri-o://9553c723a79386531c541f329309a82c12c77f04092420f1c03ea377ee5af7fa" gracePeriod=30 Jan 28 17:03:31 crc kubenswrapper[4877]: I0128 17:03:31.449039 4877 generic.go:334] "Generic (PLEG): container finished" podID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerID="f8da18adc6f257ab1290f60598e29e603cbf9a88466b4b861b741f049e8d9e5e" exitCode=0 Jan 28 17:03:31 crc kubenswrapper[4877]: I0128 17:03:31.449602 4877 generic.go:334] "Generic (PLEG): container finished" podID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerID="6c297981962d34967bc4fc0b1bebc07b2dad63ab349af54ddf16af5e3c869dc9" exitCode=2 Jan 28 17:03:31 crc kubenswrapper[4877]: I0128 17:03:31.449619 4877 generic.go:334] "Generic (PLEG): container finished" podID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerID="8b419eaa0a496a37f2f9cb39c4fdd94f1f3a2d919221c36ac37f68698eb9f4fe" exitCode=0 Jan 28 17:03:31 crc kubenswrapper[4877]: I0128 17:03:31.449196 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerDied","Data":"f8da18adc6f257ab1290f60598e29e603cbf9a88466b4b861b741f049e8d9e5e"} Jan 28 17:03:31 crc kubenswrapper[4877]: I0128 17:03:31.449710 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerDied","Data":"6c297981962d34967bc4fc0b1bebc07b2dad63ab349af54ddf16af5e3c869dc9"} Jan 28 17:03:31 crc kubenswrapper[4877]: I0128 17:03:31.449732 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerDied","Data":"8b419eaa0a496a37f2f9cb39c4fdd94f1f3a2d919221c36ac37f68698eb9f4fe"} Jan 28 17:03:31 crc kubenswrapper[4877]: I0128 17:03:31.452268 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerStarted","Data":"260d70b88dfb5bc985a90b895c2fad459081454e9c8300e287c7fb1511a4c6bc"} Jan 28 17:03:32 crc kubenswrapper[4877]: I0128 17:03:32.467911 4877 generic.go:334] "Generic (PLEG): container finished" podID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerID="9553c723a79386531c541f329309a82c12c77f04092420f1c03ea377ee5af7fa" exitCode=0 Jan 28 17:03:32 crc kubenswrapper[4877]: I0128 17:03:32.468187 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerDied","Data":"9553c723a79386531c541f329309a82c12c77f04092420f1c03ea377ee5af7fa"} Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.291724 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.332670 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:03:33 crc kubenswrapper[4877]: E0128 17:03:33.333313 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.420232 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-scripts\") pod \"67eb2c94-ae13-4c83-9344-bfe125a62267\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.420592 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-sg-core-conf-yaml\") pod \"67eb2c94-ae13-4c83-9344-bfe125a62267\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.420790 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-run-httpd\") pod \"67eb2c94-ae13-4c83-9344-bfe125a62267\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.420865 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-combined-ca-bundle\") pod \"67eb2c94-ae13-4c83-9344-bfe125a62267\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.420903 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-config-data\") pod \"67eb2c94-ae13-4c83-9344-bfe125a62267\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.421001 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-log-httpd\") pod \"67eb2c94-ae13-4c83-9344-bfe125a62267\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.421082 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjjth\" (UniqueName: \"kubernetes.io/projected/67eb2c94-ae13-4c83-9344-bfe125a62267-kube-api-access-gjjth\") pod \"67eb2c94-ae13-4c83-9344-bfe125a62267\" (UID: \"67eb2c94-ae13-4c83-9344-bfe125a62267\") " Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.424035 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "67eb2c94-ae13-4c83-9344-bfe125a62267" (UID: "67eb2c94-ae13-4c83-9344-bfe125a62267"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.425052 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "67eb2c94-ae13-4c83-9344-bfe125a62267" (UID: "67eb2c94-ae13-4c83-9344-bfe125a62267"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.427864 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67eb2c94-ae13-4c83-9344-bfe125a62267-kube-api-access-gjjth" (OuterVolumeSpecName: "kube-api-access-gjjth") pod "67eb2c94-ae13-4c83-9344-bfe125a62267" (UID: "67eb2c94-ae13-4c83-9344-bfe125a62267"). InnerVolumeSpecName "kube-api-access-gjjth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.432918 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-scripts" (OuterVolumeSpecName: "scripts") pod "67eb2c94-ae13-4c83-9344-bfe125a62267" (UID: "67eb2c94-ae13-4c83-9344-bfe125a62267"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.454368 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "67eb2c94-ae13-4c83-9344-bfe125a62267" (UID: "67eb2c94-ae13-4c83-9344-bfe125a62267"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.483774 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"67eb2c94-ae13-4c83-9344-bfe125a62267","Type":"ContainerDied","Data":"af28dc0c398f81e056a2efe9b00ad7dd5e37e089a313e79f28d40a62c9842be3"} Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.483859 4877 scope.go:117] "RemoveContainer" containerID="f8da18adc6f257ab1290f60598e29e603cbf9a88466b4b861b741f049e8d9e5e" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.483868 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.524820 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.524851 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjjth\" (UniqueName: \"kubernetes.io/projected/67eb2c94-ae13-4c83-9344-bfe125a62267-kube-api-access-gjjth\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.524863 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.524873 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.524883 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/67eb2c94-ae13-4c83-9344-bfe125a62267-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.527112 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67eb2c94-ae13-4c83-9344-bfe125a62267" (UID: "67eb2c94-ae13-4c83-9344-bfe125a62267"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.545394 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-config-data" (OuterVolumeSpecName: "config-data") pod "67eb2c94-ae13-4c83-9344-bfe125a62267" (UID: "67eb2c94-ae13-4c83-9344-bfe125a62267"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.627578 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.627892 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67eb2c94-ae13-4c83-9344-bfe125a62267-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.814825 4877 scope.go:117] "RemoveContainer" containerID="6c297981962d34967bc4fc0b1bebc07b2dad63ab349af54ddf16af5e3c869dc9" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.818752 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.889675 4877 scope.go:117] "RemoveContainer" containerID="9553c723a79386531c541f329309a82c12c77f04092420f1c03ea377ee5af7fa" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.903716 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.916743 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:33 crc kubenswrapper[4877]: E0128 17:03:33.917217 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="sg-core" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917236 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="sg-core" Jan 28 17:03:33 crc kubenswrapper[4877]: E0128 17:03:33.917255 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-central-agent" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917262 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-central-agent" Jan 28 17:03:33 crc kubenswrapper[4877]: E0128 17:03:33.917276 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="proxy-httpd" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917282 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="proxy-httpd" Jan 28 17:03:33 crc kubenswrapper[4877]: E0128 17:03:33.917309 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-notification-agent" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917316 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-notification-agent" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917568 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="sg-core" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917595 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-central-agent" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917610 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="proxy-httpd" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.917618 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" containerName="ceilometer-notification-agent" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.919899 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.921806 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.922768 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.928450 4877 scope.go:117] "RemoveContainer" containerID="8b419eaa0a496a37f2f9cb39c4fdd94f1f3a2d919221c36ac37f68698eb9f4fe" Jan 28 17:03:33 crc kubenswrapper[4877]: I0128 17:03:33.933027 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.046271 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-log-httpd\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.046392 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-run-httpd\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.046467 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-config-data\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.046615 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.046671 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7sm9\" (UniqueName: \"kubernetes.io/projected/944a2fc1-d799-4137-8b34-fd0d76278dee-kube-api-access-c7sm9\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.046718 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-scripts\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.047138 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.148947 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149007 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-log-httpd\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149043 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-run-httpd\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149076 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-config-data\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149119 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149144 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7sm9\" (UniqueName: \"kubernetes.io/projected/944a2fc1-d799-4137-8b34-fd0d76278dee-kube-api-access-c7sm9\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149167 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-scripts\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149608 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-log-httpd\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.149611 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-run-httpd\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.153873 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.153890 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-scripts\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.154172 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.154737 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-config-data\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.165051 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7sm9\" (UniqueName: \"kubernetes.io/projected/944a2fc1-d799-4137-8b34-fd0d76278dee-kube-api-access-c7sm9\") pod \"ceilometer-0\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.238616 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:34 crc kubenswrapper[4877]: I0128 17:03:34.837734 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:35 crc kubenswrapper[4877]: I0128 17:03:35.346085 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67eb2c94-ae13-4c83-9344-bfe125a62267" path="/var/lib/kubelet/pods/67eb2c94-ae13-4c83-9344-bfe125a62267/volumes" Jan 28 17:03:35 crc kubenswrapper[4877]: I0128 17:03:35.491350 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:35 crc kubenswrapper[4877]: I0128 17:03:35.522046 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerStarted","Data":"52a5321e9afac6fcad92118b0802a832fb65528964aec16a89d11cf112d7ce10"} Jan 28 17:03:35 crc kubenswrapper[4877]: I0128 17:03:35.524246 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerStarted","Data":"f30427682927d6697d795ec003cd86c1773d146d41690818787e684314926fef"} Jan 28 17:03:36 crc kubenswrapper[4877]: I0128 17:03:36.537785 4877 generic.go:334] "Generic (PLEG): container finished" podID="d0456757-a0e3-42a7-900f-422828fe9836" containerID="280d1e05976bf316ebf7fa080e91b03cd06512ba323bcc8e9eb0e6c4179b8f80" exitCode=0 Jan 28 17:03:36 crc kubenswrapper[4877]: I0128 17:03:36.538348 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" event={"ID":"d0456757-a0e3-42a7-900f-422828fe9836","Type":"ContainerDied","Data":"280d1e05976bf316ebf7fa080e91b03cd06512ba323bcc8e9eb0e6c4179b8f80"} Jan 28 17:03:36 crc kubenswrapper[4877]: I0128 17:03:36.557717 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerStarted","Data":"b268825525e642fd2f91a075887ca7807113d3761003a53ec903d280e8b37b17"} Jan 28 17:03:37 crc kubenswrapper[4877]: I0128 17:03:37.593206 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerStarted","Data":"485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50"} Jan 28 17:03:37 crc kubenswrapper[4877]: I0128 17:03:37.593510 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-api" containerID="cri-o://c253ce01bf3aa86a94d013e3a7fac2725ea4e3bd96ed54d591dc9dde378f6012" gracePeriod=30 Jan 28 17:03:37 crc kubenswrapper[4877]: I0128 17:03:37.593559 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-listener" containerID="cri-o://485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50" gracePeriod=30 Jan 28 17:03:37 crc kubenswrapper[4877]: I0128 17:03:37.593598 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-notifier" containerID="cri-o://f30427682927d6697d795ec003cd86c1773d146d41690818787e684314926fef" gracePeriod=30 Jan 28 17:03:37 crc kubenswrapper[4877]: I0128 17:03:37.593644 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-evaluator" containerID="cri-o://260d70b88dfb5bc985a90b895c2fad459081454e9c8300e287c7fb1511a4c6bc" gracePeriod=30 Jan 28 17:03:37 crc kubenswrapper[4877]: I0128 17:03:37.600509 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerStarted","Data":"405183ef079e6231df0ab68ae0a0cc9f0c53714eebade06a2add92e3f958da76"} Jan 28 17:03:37 crc kubenswrapper[4877]: I0128 17:03:37.615222 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.834688893 podStartE2EDuration="11.615203888s" podCreationTimestamp="2026-01-28 17:03:26 +0000 UTC" firstStartedPulling="2026-01-28 17:03:27.787036417 +0000 UTC m=+1711.345363305" lastFinishedPulling="2026-01-28 17:03:36.567551412 +0000 UTC m=+1720.125878300" observedRunningTime="2026-01-28 17:03:37.613963775 +0000 UTC m=+1721.172290663" watchObservedRunningTime="2026-01-28 17:03:37.615203888 +0000 UTC m=+1721.173530766" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.315828 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.407124 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-scripts\") pod \"d0456757-a0e3-42a7-900f-422828fe9836\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.407190 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-combined-ca-bundle\") pod \"d0456757-a0e3-42a7-900f-422828fe9836\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.407254 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-config-data\") pod \"d0456757-a0e3-42a7-900f-422828fe9836\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.407322 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gth75\" (UniqueName: \"kubernetes.io/projected/d0456757-a0e3-42a7-900f-422828fe9836-kube-api-access-gth75\") pod \"d0456757-a0e3-42a7-900f-422828fe9836\" (UID: \"d0456757-a0e3-42a7-900f-422828fe9836\") " Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.419390 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-scripts" (OuterVolumeSpecName: "scripts") pod "d0456757-a0e3-42a7-900f-422828fe9836" (UID: "d0456757-a0e3-42a7-900f-422828fe9836"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.419555 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0456757-a0e3-42a7-900f-422828fe9836-kube-api-access-gth75" (OuterVolumeSpecName: "kube-api-access-gth75") pod "d0456757-a0e3-42a7-900f-422828fe9836" (UID: "d0456757-a0e3-42a7-900f-422828fe9836"). InnerVolumeSpecName "kube-api-access-gth75". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.454459 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-config-data" (OuterVolumeSpecName: "config-data") pod "d0456757-a0e3-42a7-900f-422828fe9836" (UID: "d0456757-a0e3-42a7-900f-422828fe9836"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.454584 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0456757-a0e3-42a7-900f-422828fe9836" (UID: "d0456757-a0e3-42a7-900f-422828fe9836"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.518672 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.518732 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.518748 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0456757-a0e3-42a7-900f-422828fe9836-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.518762 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gth75\" (UniqueName: \"kubernetes.io/projected/d0456757-a0e3-42a7-900f-422828fe9836-kube-api-access-gth75\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.629902 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerStarted","Data":"42ee8dc0645f19e5c19f356c136cc574556fb3b1c80a709f22bbffe30c745b33"} Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.634253 4877 generic.go:334] "Generic (PLEG): container finished" podID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerID="f30427682927d6697d795ec003cd86c1773d146d41690818787e684314926fef" exitCode=0 Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.634283 4877 generic.go:334] "Generic (PLEG): container finished" podID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerID="260d70b88dfb5bc985a90b895c2fad459081454e9c8300e287c7fb1511a4c6bc" exitCode=0 Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.634291 4877 generic.go:334] "Generic (PLEG): container finished" podID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerID="c253ce01bf3aa86a94d013e3a7fac2725ea4e3bd96ed54d591dc9dde378f6012" exitCode=0 Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.634324 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerDied","Data":"f30427682927d6697d795ec003cd86c1773d146d41690818787e684314926fef"} Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.634347 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerDied","Data":"260d70b88dfb5bc985a90b895c2fad459081454e9c8300e287c7fb1511a4c6bc"} Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.634355 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerDied","Data":"c253ce01bf3aa86a94d013e3a7fac2725ea4e3bd96ed54d591dc9dde378f6012"} Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.636542 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" event={"ID":"d0456757-a0e3-42a7-900f-422828fe9836","Type":"ContainerDied","Data":"9777ec0abacef928cf378ec1fc8f334ecad0a93e3fb0376f086035fed6788e6e"} Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.636566 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9777ec0abacef928cf378ec1fc8f334ecad0a93e3fb0376f086035fed6788e6e" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.636614 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-j4pm4" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.795633 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 17:03:38 crc kubenswrapper[4877]: E0128 17:03:38.796524 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0456757-a0e3-42a7-900f-422828fe9836" containerName="nova-cell0-conductor-db-sync" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.796546 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0456757-a0e3-42a7-900f-422828fe9836" containerName="nova-cell0-conductor-db-sync" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.796932 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0456757-a0e3-42a7-900f-422828fe9836" containerName="nova-cell0-conductor-db-sync" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.800946 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.824214 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.824298 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.824437 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbfxc\" (UniqueName: \"kubernetes.io/projected/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-kube-api-access-bbfxc\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.824614 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ktvl9" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.824621 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.856610 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.928422 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.932064 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbfxc\" (UniqueName: \"kubernetes.io/projected/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-kube-api-access-bbfxc\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.932232 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.934418 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.936651 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:38 crc kubenswrapper[4877]: I0128 17:03:38.963962 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbfxc\" (UniqueName: \"kubernetes.io/projected/ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83-kube-api-access-bbfxc\") pod \"nova-cell0-conductor-0\" (UID: \"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83\") " pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:39 crc kubenswrapper[4877]: I0128 17:03:39.155930 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:39 crc kubenswrapper[4877]: I0128 17:03:39.636944 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 17:03:39 crc kubenswrapper[4877]: W0128 17:03:39.644278 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffe6c6bd_b766_4c0d_96a2_b0fc75e78c83.slice/crio-e6da9fa17ec9cc2c6058c3f7eaddfccad53e41bb0163bb3c55e31a4b2128df30 WatchSource:0}: Error finding container e6da9fa17ec9cc2c6058c3f7eaddfccad53e41bb0163bb3c55e31a4b2128df30: Status 404 returned error can't find the container with id e6da9fa17ec9cc2c6058c3f7eaddfccad53e41bb0163bb3c55e31a4b2128df30 Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.672382 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerStarted","Data":"c3b6881ee1402b6e8fdee4e7e3b517b7b16bd5a797df6e7ce895f80113fc872d"} Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.673443 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.672547 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="proxy-httpd" containerID="cri-o://c3b6881ee1402b6e8fdee4e7e3b517b7b16bd5a797df6e7ce895f80113fc872d" gracePeriod=30 Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.672465 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-central-agent" containerID="cri-o://b268825525e642fd2f91a075887ca7807113d3761003a53ec903d280e8b37b17" gracePeriod=30 Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.672597 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="sg-core" containerID="cri-o://42ee8dc0645f19e5c19f356c136cc574556fb3b1c80a709f22bbffe30c745b33" gracePeriod=30 Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.672625 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-notification-agent" containerID="cri-o://405183ef079e6231df0ab68ae0a0cc9f0c53714eebade06a2add92e3f958da76" gracePeriod=30 Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.675931 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83","Type":"ContainerStarted","Data":"808cb886506658585a57027852d27d89a93694c848090df52ac795e47234242c"} Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.675970 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"ffe6c6bd-b766-4c0d-96a2-b0fc75e78c83","Type":"ContainerStarted","Data":"e6da9fa17ec9cc2c6058c3f7eaddfccad53e41bb0163bb3c55e31a4b2128df30"} Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.676793 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.731432 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.6173553910000003 podStartE2EDuration="7.731410917s" podCreationTimestamp="2026-01-28 17:03:33 +0000 UTC" firstStartedPulling="2026-01-28 17:03:34.845073487 +0000 UTC m=+1718.403400375" lastFinishedPulling="2026-01-28 17:03:39.959129013 +0000 UTC m=+1723.517455901" observedRunningTime="2026-01-28 17:03:40.707653367 +0000 UTC m=+1724.265980255" watchObservedRunningTime="2026-01-28 17:03:40.731410917 +0000 UTC m=+1724.289737805" Jan 28 17:03:40 crc kubenswrapper[4877]: I0128 17:03:40.737606 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.7375933249999997 podStartE2EDuration="2.737593325s" podCreationTimestamp="2026-01-28 17:03:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:03:40.730799851 +0000 UTC m=+1724.289126739" watchObservedRunningTime="2026-01-28 17:03:40.737593325 +0000 UTC m=+1724.295920213" Jan 28 17:03:41 crc kubenswrapper[4877]: I0128 17:03:41.688431 4877 generic.go:334] "Generic (PLEG): container finished" podID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerID="c3b6881ee1402b6e8fdee4e7e3b517b7b16bd5a797df6e7ce895f80113fc872d" exitCode=0 Jan 28 17:03:41 crc kubenswrapper[4877]: I0128 17:03:41.689312 4877 generic.go:334] "Generic (PLEG): container finished" podID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerID="42ee8dc0645f19e5c19f356c136cc574556fb3b1c80a709f22bbffe30c745b33" exitCode=2 Jan 28 17:03:41 crc kubenswrapper[4877]: I0128 17:03:41.689406 4877 generic.go:334] "Generic (PLEG): container finished" podID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerID="405183ef079e6231df0ab68ae0a0cc9f0c53714eebade06a2add92e3f958da76" exitCode=0 Jan 28 17:03:41 crc kubenswrapper[4877]: I0128 17:03:41.688612 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerDied","Data":"c3b6881ee1402b6e8fdee4e7e3b517b7b16bd5a797df6e7ce895f80113fc872d"} Jan 28 17:03:41 crc kubenswrapper[4877]: I0128 17:03:41.689556 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerDied","Data":"42ee8dc0645f19e5c19f356c136cc574556fb3b1c80a709f22bbffe30c745b33"} Jan 28 17:03:41 crc kubenswrapper[4877]: I0128 17:03:41.689582 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerDied","Data":"405183ef079e6231df0ab68ae0a0cc9f0c53714eebade06a2add92e3f958da76"} Jan 28 17:03:42 crc kubenswrapper[4877]: I0128 17:03:42.704967 4877 generic.go:334] "Generic (PLEG): container finished" podID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerID="b268825525e642fd2f91a075887ca7807113d3761003a53ec903d280e8b37b17" exitCode=0 Jan 28 17:03:42 crc kubenswrapper[4877]: I0128 17:03:42.705047 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerDied","Data":"b268825525e642fd2f91a075887ca7807113d3761003a53ec903d280e8b37b17"} Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.054579 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160339 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-run-httpd\") pod \"944a2fc1-d799-4137-8b34-fd0d76278dee\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160417 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-combined-ca-bundle\") pod \"944a2fc1-d799-4137-8b34-fd0d76278dee\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160579 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7sm9\" (UniqueName: \"kubernetes.io/projected/944a2fc1-d799-4137-8b34-fd0d76278dee-kube-api-access-c7sm9\") pod \"944a2fc1-d799-4137-8b34-fd0d76278dee\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160608 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-log-httpd\") pod \"944a2fc1-d799-4137-8b34-fd0d76278dee\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160636 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-config-data\") pod \"944a2fc1-d799-4137-8b34-fd0d76278dee\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160684 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-scripts\") pod \"944a2fc1-d799-4137-8b34-fd0d76278dee\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160723 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-sg-core-conf-yaml\") pod \"944a2fc1-d799-4137-8b34-fd0d76278dee\" (UID: \"944a2fc1-d799-4137-8b34-fd0d76278dee\") " Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.160951 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "944a2fc1-d799-4137-8b34-fd0d76278dee" (UID: "944a2fc1-d799-4137-8b34-fd0d76278dee"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.161359 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "944a2fc1-d799-4137-8b34-fd0d76278dee" (UID: "944a2fc1-d799-4137-8b34-fd0d76278dee"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.161968 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.161997 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/944a2fc1-d799-4137-8b34-fd0d76278dee-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.166044 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/944a2fc1-d799-4137-8b34-fd0d76278dee-kube-api-access-c7sm9" (OuterVolumeSpecName: "kube-api-access-c7sm9") pod "944a2fc1-d799-4137-8b34-fd0d76278dee" (UID: "944a2fc1-d799-4137-8b34-fd0d76278dee"). InnerVolumeSpecName "kube-api-access-c7sm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.166380 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-scripts" (OuterVolumeSpecName: "scripts") pod "944a2fc1-d799-4137-8b34-fd0d76278dee" (UID: "944a2fc1-d799-4137-8b34-fd0d76278dee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.193136 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "944a2fc1-d799-4137-8b34-fd0d76278dee" (UID: "944a2fc1-d799-4137-8b34-fd0d76278dee"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.250904 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "944a2fc1-d799-4137-8b34-fd0d76278dee" (UID: "944a2fc1-d799-4137-8b34-fd0d76278dee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.264660 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.264697 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7sm9\" (UniqueName: \"kubernetes.io/projected/944a2fc1-d799-4137-8b34-fd0d76278dee-kube-api-access-c7sm9\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.264710 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.264720 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.280770 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-config-data" (OuterVolumeSpecName: "config-data") pod "944a2fc1-d799-4137-8b34-fd0d76278dee" (UID: "944a2fc1-d799-4137-8b34-fd0d76278dee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.368515 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/944a2fc1-d799-4137-8b34-fd0d76278dee-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.719723 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"944a2fc1-d799-4137-8b34-fd0d76278dee","Type":"ContainerDied","Data":"52a5321e9afac6fcad92118b0802a832fb65528964aec16a89d11cf112d7ce10"} Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.719778 4877 scope.go:117] "RemoveContainer" containerID="c3b6881ee1402b6e8fdee4e7e3b517b7b16bd5a797df6e7ce895f80113fc872d" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.719787 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.750778 4877 scope.go:117] "RemoveContainer" containerID="42ee8dc0645f19e5c19f356c136cc574556fb3b1c80a709f22bbffe30c745b33" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.755745 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.775580 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.784149 4877 scope.go:117] "RemoveContainer" containerID="405183ef079e6231df0ab68ae0a0cc9f0c53714eebade06a2add92e3f958da76" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.797455 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:43 crc kubenswrapper[4877]: E0128 17:03:43.799707 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-central-agent" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.799741 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-central-agent" Jan 28 17:03:43 crc kubenswrapper[4877]: E0128 17:03:43.799780 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-notification-agent" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.799790 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-notification-agent" Jan 28 17:03:43 crc kubenswrapper[4877]: E0128 17:03:43.799818 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="sg-core" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.799826 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="sg-core" Jan 28 17:03:43 crc kubenswrapper[4877]: E0128 17:03:43.799836 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="proxy-httpd" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.799843 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="proxy-httpd" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.813374 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-notification-agent" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.813423 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="proxy-httpd" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.813451 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="ceilometer-central-agent" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.813504 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" containerName="sg-core" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.825294 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.825539 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.825536 4877 scope.go:117] "RemoveContainer" containerID="b268825525e642fd2f91a075887ca7807113d3761003a53ec903d280e8b37b17" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.829630 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.829746 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.881061 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-scripts\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.881107 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5fxt\" (UniqueName: \"kubernetes.io/projected/f6df48e5-8631-4576-9cff-5795a458241e-kube-api-access-g5fxt\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.881159 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.881177 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-log-httpd\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.881265 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-run-httpd\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.881309 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-config-data\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.881428 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.983214 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-log-httpd\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.983295 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-run-httpd\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.983343 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-config-data\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.983417 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.983544 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-scripts\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.983568 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5fxt\" (UniqueName: \"kubernetes.io/projected/f6df48e5-8631-4576-9cff-5795a458241e-kube-api-access-g5fxt\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.983614 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.985045 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-log-httpd\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.985160 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-run-httpd\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.989409 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-scripts\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.990119 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-config-data\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.990643 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:43 crc kubenswrapper[4877]: I0128 17:03:43.990676 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.003917 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5fxt\" (UniqueName: \"kubernetes.io/projected/f6df48e5-8631-4576-9cff-5795a458241e-kube-api-access-g5fxt\") pod \"ceilometer-0\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " pod="openstack/ceilometer-0" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.162498 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.195643 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.330824 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:03:44 crc kubenswrapper[4877]: E0128 17:03:44.331535 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.688234 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.731636 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-lpr6d"] Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.733567 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.743127 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-lpr6d"] Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.749376 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.749632 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.756971 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerStarted","Data":"d1ffcf341fe062aeadbf6f0d6b6322aa663b564bb05682fd0110736523a4f082"} Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.803328 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.803617 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-config-data\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.803775 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnhrx\" (UniqueName: \"kubernetes.io/projected/e5b7f132-af69-43a8-8771-c039c8039a35-kube-api-access-hnhrx\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.803867 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-scripts\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.898524 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.900152 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.904767 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.906407 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.906687 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-config-data\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.906742 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnhrx\" (UniqueName: \"kubernetes.io/projected/e5b7f132-af69-43a8-8771-c039c8039a35-kube-api-access-hnhrx\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.906779 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-scripts\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.918421 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.927290 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.935832 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-config-data\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.937576 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-scripts\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:44 crc kubenswrapper[4877]: I0128 17:03:44.957596 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnhrx\" (UniqueName: \"kubernetes.io/projected/e5b7f132-af69-43a8-8771-c039c8039a35-kube-api-access-hnhrx\") pod \"nova-cell0-cell-mapping-lpr6d\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.010445 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-config-data\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.010603 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.010687 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rqvd\" (UniqueName: \"kubernetes.io/projected/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-kube-api-access-6rqvd\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.064682 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.076135 4877 scope.go:117] "RemoveContainer" containerID="04e0c1a3a7bb75708118420d020e8a25d27589cc1f9c7f1ae8beb17f007e9e79" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.078968 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.089408 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.099662 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.116107 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.116216 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rqvd\" (UniqueName: \"kubernetes.io/projected/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-kube-api-access-6rqvd\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.116290 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.116415 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb1cf2c3-547c-4c1b-9977-b259e8db132a-logs\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.116441 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5qfk\" (UniqueName: \"kubernetes.io/projected/bb1cf2c3-547c-4c1b-9977-b259e8db132a-kube-api-access-s5qfk\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.116508 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-config-data\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.116565 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-config-data\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.120587 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.143078 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.143716 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.144448 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-config-data\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.144633 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.161529 4877 scope.go:117] "RemoveContainer" containerID="3b80ff6004d25625cee197d1a4284bb7158695ccdc578091065cb3355d16fb0d" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.161824 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.182553 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.207164 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rqvd\" (UniqueName: \"kubernetes.io/projected/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-kube-api-access-6rqvd\") pod \"nova-scheduler-0\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.220283 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.220433 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb1cf2c3-547c-4c1b-9977-b259e8db132a-logs\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.220458 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5qfk\" (UniqueName: \"kubernetes.io/projected/bb1cf2c3-547c-4c1b-9977-b259e8db132a-kube-api-access-s5qfk\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.220560 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-config-data\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.223440 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb1cf2c3-547c-4c1b-9977-b259e8db132a-logs\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.231191 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-config-data\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.235150 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.272955 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5qfk\" (UniqueName: \"kubernetes.io/projected/bb1cf2c3-547c-4c1b-9977-b259e8db132a-kube-api-access-s5qfk\") pod \"nova-metadata-0\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.335253 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvz5s\" (UniqueName: \"kubernetes.io/projected/e2464c30-bc7c-4481-8ccd-ab866e7fc678-kube-api-access-mvz5s\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.335698 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.335877 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.353978 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.443237 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.443426 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.443562 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvz5s\" (UniqueName: \"kubernetes.io/projected/e2464c30-bc7c-4481-8ccd-ab866e7fc678-kube-api-access-mvz5s\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.467087 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.467679 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.524212 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="944a2fc1-d799-4137-8b34-fd0d76278dee" path="/var/lib/kubelet/pods/944a2fc1-d799-4137-8b34-fd0d76278dee/volumes" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.525358 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.528879 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.536932 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.542195 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvz5s\" (UniqueName: \"kubernetes.io/projected/e2464c30-bc7c-4481-8ccd-ab866e7fc678-kube-api-access-mvz5s\") pod \"nova-cell1-novncproxy-0\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.577488 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.649749 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.675432 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.681116 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcx8w\" (UniqueName: \"kubernetes.io/projected/77ef8d5f-555c-4e83-ad96-9d63b95c3684-kube-api-access-fcx8w\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.681223 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ef8d5f-555c-4e83-ad96-9d63b95c3684-logs\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.681314 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.681434 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-config-data\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.782239 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-wgdw5"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.824571 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ef8d5f-555c-4e83-ad96-9d63b95c3684-logs\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.824756 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.824963 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-config-data\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.825220 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcx8w\" (UniqueName: \"kubernetes.io/projected/77ef8d5f-555c-4e83-ad96-9d63b95c3684-kube-api-access-fcx8w\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.836711 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ef8d5f-555c-4e83-ad96-9d63b95c3684-logs\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.843295 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.851326 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-config-data\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.855130 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.882735 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcx8w\" (UniqueName: \"kubernetes.io/projected/77ef8d5f-555c-4e83-ad96-9d63b95c3684-kube-api-access-fcx8w\") pod \"nova-api-0\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " pod="openstack/nova-api-0" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.915556 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-wgdw5"] Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.927215 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk7d9\" (UniqueName: \"kubernetes.io/projected/d61dd139-ed88-4a56-974d-3860f196e55d-kube-api-access-wk7d9\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.927290 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-config\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.927331 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.927357 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.927451 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:45 crc kubenswrapper[4877]: I0128 17:03:45.927584 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.030145 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.030214 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.030357 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.030442 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.030612 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk7d9\" (UniqueName: \"kubernetes.io/projected/d61dd139-ed88-4a56-974d-3860f196e55d-kube-api-access-wk7d9\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.030667 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-config\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.031515 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-config\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.031524 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.032155 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.032253 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.032965 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.052249 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.075546 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk7d9\" (UniqueName: \"kubernetes.io/projected/d61dd139-ed88-4a56-974d-3860f196e55d-kube-api-access-wk7d9\") pod \"dnsmasq-dns-5fbc4d444f-wgdw5\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.125739 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.275055 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-lpr6d"] Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.691256 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.738379 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.881992 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f5bgl"] Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.884346 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.889907 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.890101 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.924519 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f5bgl"] Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.925456 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bb1cf2c3-547c-4c1b-9977-b259e8db132a","Type":"ContainerStarted","Data":"2d3a47e1b9df7658a68d0da86e19eb8e63782f3b59ab3fce7704711c3fa5020c"} Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.943207 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2","Type":"ContainerStarted","Data":"aa5c1ec95591ff8bb5b017cb55e428044bd0b275db37e7c1314e16eba90d2306"} Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.944518 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerStarted","Data":"2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf"} Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.945412 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-lpr6d" event={"ID":"e5b7f132-af69-43a8-8771-c039c8039a35","Type":"ContainerStarted","Data":"2486e9d8170a0b6dafccce734d1b48cac7c3dac1483266b420dcd9f01fb176b8"} Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.993881 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-scripts\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.993974 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghbnh\" (UniqueName: \"kubernetes.io/projected/e4954890-1a90-4904-a7d8-f286d1b56745-kube-api-access-ghbnh\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.994090 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-config-data\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:46 crc kubenswrapper[4877]: I0128 17:03:46.994131 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.112854 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-scripts\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.113035 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghbnh\" (UniqueName: \"kubernetes.io/projected/e4954890-1a90-4904-a7d8-f286d1b56745-kube-api-access-ghbnh\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.113264 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-config-data\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.113361 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.121746 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.123069 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-config-data\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.124925 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-scripts\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.130725 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.135330 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghbnh\" (UniqueName: \"kubernetes.io/projected/e4954890-1a90-4904-a7d8-f286d1b56745-kube-api-access-ghbnh\") pod \"nova-cell1-conductor-db-sync-f5bgl\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.295867 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.602685 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-wgdw5"] Jan 28 17:03:47 crc kubenswrapper[4877]: I0128 17:03:47.633592 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:03:48 crc kubenswrapper[4877]: I0128 17:03:48.168062 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-lpr6d" event={"ID":"e5b7f132-af69-43a8-8771-c039c8039a35","Type":"ContainerStarted","Data":"97f20036790abdca2e5eceae7ba75fd06efad3ef727f8220c097ad21982b0b86"} Jan 28 17:03:48 crc kubenswrapper[4877]: I0128 17:03:48.194724 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerStarted","Data":"bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088"} Jan 28 17:03:48 crc kubenswrapper[4877]: I0128 17:03:48.216588 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" event={"ID":"d61dd139-ed88-4a56-974d-3860f196e55d","Type":"ContainerStarted","Data":"3164420824da21792c83b06a89da2e238292a4858721990da8f60e0db79537bd"} Jan 28 17:03:48 crc kubenswrapper[4877]: I0128 17:03:48.235119 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e2464c30-bc7c-4481-8ccd-ab866e7fc678","Type":"ContainerStarted","Data":"09cf5f00613e7ec89eedc409c1d90a49fcdc5ad4926736de80b600ad9a4d016b"} Jan 28 17:03:48 crc kubenswrapper[4877]: I0128 17:03:48.259843 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ef8d5f-555c-4e83-ad96-9d63b95c3684","Type":"ContainerStarted","Data":"b4d642541c8fe9bde3ec2db698656a6b187be6592896ba4cfff0d3c3b7b29e30"} Jan 28 17:03:48 crc kubenswrapper[4877]: I0128 17:03:48.264535 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-lpr6d" podStartSLOduration=4.264507072 podStartE2EDuration="4.264507072s" podCreationTimestamp="2026-01-28 17:03:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:03:48.235363317 +0000 UTC m=+1731.793690205" watchObservedRunningTime="2026-01-28 17:03:48.264507072 +0000 UTC m=+1731.822833960" Jan 28 17:03:48 crc kubenswrapper[4877]: I0128 17:03:48.362238 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f5bgl"] Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.309511 4877 generic.go:334] "Generic (PLEG): container finished" podID="d61dd139-ed88-4a56-974d-3860f196e55d" containerID="b6985e3177c8cadcf98b329537029623a7da0f3b1095063d99babfc3c43ea51d" exitCode=0 Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.309892 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" event={"ID":"d61dd139-ed88-4a56-974d-3860f196e55d","Type":"ContainerDied","Data":"b6985e3177c8cadcf98b329537029623a7da0f3b1095063d99babfc3c43ea51d"} Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.333619 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" event={"ID":"e4954890-1a90-4904-a7d8-f286d1b56745","Type":"ContainerStarted","Data":"05cf205cb7271560cfdd0cac3ffd18218092b8634d3059295d86db5e3ec969aa"} Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.333676 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" event={"ID":"e4954890-1a90-4904-a7d8-f286d1b56745","Type":"ContainerStarted","Data":"b89afaf5b7c8b1a6fa9758a968da016004140cf553977a5476febe7b0fccfdc4"} Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.379215 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerStarted","Data":"4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf"} Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.390395 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" podStartSLOduration=3.390374107 podStartE2EDuration="3.390374107s" podCreationTimestamp="2026-01-28 17:03:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:03:49.361777846 +0000 UTC m=+1732.920104734" watchObservedRunningTime="2026-01-28 17:03:49.390374107 +0000 UTC m=+1732.948700995" Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.675922 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:03:49 crc kubenswrapper[4877]: I0128 17:03:49.693371 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.477970 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" event={"ID":"d61dd139-ed88-4a56-974d-3860f196e55d","Type":"ContainerStarted","Data":"193cec595451e9a6635aedd8fa161bb3ffd7422d6d6a47a5fa3a9933c06bdffc"} Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.478492 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.480431 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="e2464c30-bc7c-4481-8ccd-ab866e7fc678" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688" gracePeriod=30 Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.480554 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e2464c30-bc7c-4481-8ccd-ab866e7fc678","Type":"ContainerStarted","Data":"cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688"} Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.485266 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ef8d5f-555c-4e83-ad96-9d63b95c3684","Type":"ContainerStarted","Data":"da62c083db84e6a1f34a1aefd714ad86e2f97f928942dcc9b49e2d1a8c014c04"} Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.490022 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bb1cf2c3-547c-4c1b-9977-b259e8db132a","Type":"ContainerStarted","Data":"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904"} Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.496058 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2","Type":"ContainerStarted","Data":"ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda"} Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.507717 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" podStartSLOduration=8.507698908 podStartE2EDuration="8.507698908s" podCreationTimestamp="2026-01-28 17:03:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:03:53.505047257 +0000 UTC m=+1737.063374155" watchObservedRunningTime="2026-01-28 17:03:53.507698908 +0000 UTC m=+1737.066025796" Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.509742 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerStarted","Data":"83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115"} Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.510022 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.537431 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.666601597 podStartE2EDuration="9.537405849s" podCreationTimestamp="2026-01-28 17:03:44 +0000 UTC" firstStartedPulling="2026-01-28 17:03:46.878786404 +0000 UTC m=+1730.437113292" lastFinishedPulling="2026-01-28 17:03:52.749590656 +0000 UTC m=+1736.307917544" observedRunningTime="2026-01-28 17:03:53.528063108 +0000 UTC m=+1737.086389996" watchObservedRunningTime="2026-01-28 17:03:53.537405849 +0000 UTC m=+1737.095732757" Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.572838 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.958814233 podStartE2EDuration="8.572813024s" podCreationTimestamp="2026-01-28 17:03:45 +0000 UTC" firstStartedPulling="2026-01-28 17:03:47.151236587 +0000 UTC m=+1730.709563475" lastFinishedPulling="2026-01-28 17:03:52.765235378 +0000 UTC m=+1736.323562266" observedRunningTime="2026-01-28 17:03:53.541841698 +0000 UTC m=+1737.100168606" watchObservedRunningTime="2026-01-28 17:03:53.572813024 +0000 UTC m=+1737.131139912" Jan 28 17:03:53 crc kubenswrapper[4877]: I0128 17:03:53.632440 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.5627768250000003 podStartE2EDuration="10.63241136s" podCreationTimestamp="2026-01-28 17:03:43 +0000 UTC" firstStartedPulling="2026-01-28 17:03:44.719594029 +0000 UTC m=+1728.277920917" lastFinishedPulling="2026-01-28 17:03:52.789228564 +0000 UTC m=+1736.347555452" observedRunningTime="2026-01-28 17:03:53.573407629 +0000 UTC m=+1737.131734517" watchObservedRunningTime="2026-01-28 17:03:53.63241136 +0000 UTC m=+1737.190738248" Jan 28 17:03:54 crc kubenswrapper[4877]: I0128 17:03:54.522081 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bb1cf2c3-547c-4c1b-9977-b259e8db132a","Type":"ContainerStarted","Data":"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8"} Jan 28 17:03:54 crc kubenswrapper[4877]: I0128 17:03:54.522205 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-log" containerID="cri-o://0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904" gracePeriod=30 Jan 28 17:03:54 crc kubenswrapper[4877]: I0128 17:03:54.522634 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-metadata" containerID="cri-o://14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8" gracePeriod=30 Jan 28 17:03:54 crc kubenswrapper[4877]: I0128 17:03:54.523828 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ef8d5f-555c-4e83-ad96-9d63b95c3684","Type":"ContainerStarted","Data":"d46653cc79e460941ecff85013bf4237dc6f1962f13b3ce9bacdb467b2f550da"} Jan 28 17:03:54 crc kubenswrapper[4877]: I0128 17:03:54.552821 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.495606581 podStartE2EDuration="10.552802547s" podCreationTimestamp="2026-01-28 17:03:44 +0000 UTC" firstStartedPulling="2026-01-28 17:03:46.731092753 +0000 UTC m=+1730.289419641" lastFinishedPulling="2026-01-28 17:03:52.788288719 +0000 UTC m=+1736.346615607" observedRunningTime="2026-01-28 17:03:54.552130878 +0000 UTC m=+1738.110457786" watchObservedRunningTime="2026-01-28 17:03:54.552802547 +0000 UTC m=+1738.111129435" Jan 28 17:03:54 crc kubenswrapper[4877]: I0128 17:03:54.581871 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.49658543 podStartE2EDuration="9.581848569s" podCreationTimestamp="2026-01-28 17:03:45 +0000 UTC" firstStartedPulling="2026-01-28 17:03:47.680307397 +0000 UTC m=+1731.238634285" lastFinishedPulling="2026-01-28 17:03:52.765570536 +0000 UTC m=+1736.323897424" observedRunningTime="2026-01-28 17:03:54.568348706 +0000 UTC m=+1738.126675594" watchObservedRunningTime="2026-01-28 17:03:54.581848569 +0000 UTC m=+1738.140175457" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.335806 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.356018 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.356247 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.407855 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.524211 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5qfk\" (UniqueName: \"kubernetes.io/projected/bb1cf2c3-547c-4c1b-9977-b259e8db132a-kube-api-access-s5qfk\") pod \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.525342 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb1cf2c3-547c-4c1b-9977-b259e8db132a-logs\") pod \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.525391 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-combined-ca-bundle\") pod \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.525540 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-config-data\") pod \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\" (UID: \"bb1cf2c3-547c-4c1b-9977-b259e8db132a\") " Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.525945 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb1cf2c3-547c-4c1b-9977-b259e8db132a-logs" (OuterVolumeSpecName: "logs") pod "bb1cf2c3-547c-4c1b-9977-b259e8db132a" (UID: "bb1cf2c3-547c-4c1b-9977-b259e8db132a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.527076 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb1cf2c3-547c-4c1b-9977-b259e8db132a-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.532001 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb1cf2c3-547c-4c1b-9977-b259e8db132a-kube-api-access-s5qfk" (OuterVolumeSpecName: "kube-api-access-s5qfk") pod "bb1cf2c3-547c-4c1b-9977-b259e8db132a" (UID: "bb1cf2c3-547c-4c1b-9977-b259e8db132a"). InnerVolumeSpecName "kube-api-access-s5qfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.552987 4877 generic.go:334] "Generic (PLEG): container finished" podID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerID="14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8" exitCode=0 Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.553030 4877 generic.go:334] "Generic (PLEG): container finished" podID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerID="0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904" exitCode=143 Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.554851 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.555830 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bb1cf2c3-547c-4c1b-9977-b259e8db132a","Type":"ContainerDied","Data":"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8"} Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.555860 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bb1cf2c3-547c-4c1b-9977-b259e8db132a","Type":"ContainerDied","Data":"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904"} Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.555872 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bb1cf2c3-547c-4c1b-9977-b259e8db132a","Type":"ContainerDied","Data":"2d3a47e1b9df7658a68d0da86e19eb8e63782f3b59ab3fce7704711c3fa5020c"} Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.555886 4877 scope.go:117] "RemoveContainer" containerID="14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.565977 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-config-data" (OuterVolumeSpecName: "config-data") pod "bb1cf2c3-547c-4c1b-9977-b259e8db132a" (UID: "bb1cf2c3-547c-4c1b-9977-b259e8db132a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.589238 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb1cf2c3-547c-4c1b-9977-b259e8db132a" (UID: "bb1cf2c3-547c-4c1b-9977-b259e8db132a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.610878 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.632976 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5qfk\" (UniqueName: \"kubernetes.io/projected/bb1cf2c3-547c-4c1b-9977-b259e8db132a-kube-api-access-s5qfk\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.633033 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.633048 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb1cf2c3-547c-4c1b-9977-b259e8db132a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.651592 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.738588 4877 scope.go:117] "RemoveContainer" containerID="0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.778064 4877 scope.go:117] "RemoveContainer" containerID="14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8" Jan 28 17:03:55 crc kubenswrapper[4877]: E0128 17:03:55.779329 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8\": container with ID starting with 14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8 not found: ID does not exist" containerID="14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.779366 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8"} err="failed to get container status \"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8\": rpc error: code = NotFound desc = could not find container \"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8\": container with ID starting with 14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8 not found: ID does not exist" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.779393 4877 scope.go:117] "RemoveContainer" containerID="0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904" Jan 28 17:03:55 crc kubenswrapper[4877]: E0128 17:03:55.779930 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904\": container with ID starting with 0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904 not found: ID does not exist" containerID="0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.779967 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904"} err="failed to get container status \"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904\": rpc error: code = NotFound desc = could not find container \"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904\": container with ID starting with 0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904 not found: ID does not exist" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.779987 4877 scope.go:117] "RemoveContainer" containerID="14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.780363 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8"} err="failed to get container status \"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8\": rpc error: code = NotFound desc = could not find container \"14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8\": container with ID starting with 14c3cee1c0ae1afac8fe548996f4e2f77d3f9c9cfef1615c91634ebaa68553b8 not found: ID does not exist" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.780386 4877 scope.go:117] "RemoveContainer" containerID="0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.780707 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904"} err="failed to get container status \"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904\": rpc error: code = NotFound desc = could not find container \"0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904\": container with ID starting with 0014de3fd2d781025a74175f3fb54c83b182c9f8fb193d3830b79de867035904 not found: ID does not exist" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.896726 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.908937 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.919991 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:55 crc kubenswrapper[4877]: E0128 17:03:55.920763 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-log" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.920785 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-log" Jan 28 17:03:55 crc kubenswrapper[4877]: E0128 17:03:55.920846 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-metadata" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.920854 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-metadata" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.921082 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-log" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.921100 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" containerName="nova-metadata-metadata" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.922402 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.924970 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.925263 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 17:03:55 crc kubenswrapper[4877]: I0128 17:03:55.937656 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.045184 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8be733b9-ce02-4f46-a417-ce3d66f58aab-logs\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.045382 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-config-data\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.045438 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.045502 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.045640 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grl5n\" (UniqueName: \"kubernetes.io/projected/8be733b9-ce02-4f46-a417-ce3d66f58aab-kube-api-access-grl5n\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.056846 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.056912 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.148369 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8be733b9-ce02-4f46-a417-ce3d66f58aab-logs\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.148828 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8be733b9-ce02-4f46-a417-ce3d66f58aab-logs\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.149141 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-config-data\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.149901 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.150258 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.150490 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grl5n\" (UniqueName: \"kubernetes.io/projected/8be733b9-ce02-4f46-a417-ce3d66f58aab-kube-api-access-grl5n\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.154997 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.155537 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-config-data\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.169823 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grl5n\" (UniqueName: \"kubernetes.io/projected/8be733b9-ce02-4f46-a417-ce3d66f58aab-kube-api-access-grl5n\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.173462 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.240499 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:03:56 crc kubenswrapper[4877]: I0128 17:03:56.960435 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:03:56 crc kubenswrapper[4877]: W0128 17:03:56.974558 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8be733b9_ce02_4f46_a417_ce3d66f58aab.slice/crio-411f0b7c87bdd79d3c13e1ba3ee3e7316dd00867a3ce4e6a714a03ff671ef7db WatchSource:0}: Error finding container 411f0b7c87bdd79d3c13e1ba3ee3e7316dd00867a3ce4e6a714a03ff671ef7db: Status 404 returned error can't find the container with id 411f0b7c87bdd79d3c13e1ba3ee3e7316dd00867a3ce4e6a714a03ff671ef7db Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.139741 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.139769 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.347511 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:03:57 crc kubenswrapper[4877]: E0128 17:03:57.352020 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.368921 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb1cf2c3-547c-4c1b-9977-b259e8db132a" path="/var/lib/kubelet/pods/bb1cf2c3-547c-4c1b-9977-b259e8db132a/volumes" Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.598117 4877 generic.go:334] "Generic (PLEG): container finished" podID="e5b7f132-af69-43a8-8771-c039c8039a35" containerID="97f20036790abdca2e5eceae7ba75fd06efad3ef727f8220c097ad21982b0b86" exitCode=0 Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.598219 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-lpr6d" event={"ID":"e5b7f132-af69-43a8-8771-c039c8039a35","Type":"ContainerDied","Data":"97f20036790abdca2e5eceae7ba75fd06efad3ef727f8220c097ad21982b0b86"} Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.602620 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8be733b9-ce02-4f46-a417-ce3d66f58aab","Type":"ContainerStarted","Data":"327b181becbe2a488a11c6dbf2cb97ca7e5c9e5fd60c1e3ee4bfffebcb985f14"} Jan 28 17:03:57 crc kubenswrapper[4877]: I0128 17:03:57.602681 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8be733b9-ce02-4f46-a417-ce3d66f58aab","Type":"ContainerStarted","Data":"411f0b7c87bdd79d3c13e1ba3ee3e7316dd00867a3ce4e6a714a03ff671ef7db"} Jan 28 17:03:58 crc kubenswrapper[4877]: I0128 17:03:58.618935 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8be733b9-ce02-4f46-a417-ce3d66f58aab","Type":"ContainerStarted","Data":"8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea"} Jan 28 17:03:58 crc kubenswrapper[4877]: I0128 17:03:58.641987 4877 generic.go:334] "Generic (PLEG): container finished" podID="e4954890-1a90-4904-a7d8-f286d1b56745" containerID="05cf205cb7271560cfdd0cac3ffd18218092b8634d3059295d86db5e3ec969aa" exitCode=0 Jan 28 17:03:58 crc kubenswrapper[4877]: I0128 17:03:58.642304 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" event={"ID":"e4954890-1a90-4904-a7d8-f286d1b56745","Type":"ContainerDied","Data":"05cf205cb7271560cfdd0cac3ffd18218092b8634d3059295d86db5e3ec969aa"} Jan 28 17:03:58 crc kubenswrapper[4877]: I0128 17:03:58.669070 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.669035658 podStartE2EDuration="3.669035658s" podCreationTimestamp="2026-01-28 17:03:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:03:58.646144122 +0000 UTC m=+1742.204471030" watchObservedRunningTime="2026-01-28 17:03:58.669035658 +0000 UTC m=+1742.227362546" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.214399 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.335951 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-scripts\") pod \"e5b7f132-af69-43a8-8771-c039c8039a35\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.336063 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-combined-ca-bundle\") pod \"e5b7f132-af69-43a8-8771-c039c8039a35\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.336194 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnhrx\" (UniqueName: \"kubernetes.io/projected/e5b7f132-af69-43a8-8771-c039c8039a35-kube-api-access-hnhrx\") pod \"e5b7f132-af69-43a8-8771-c039c8039a35\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.336256 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-config-data\") pod \"e5b7f132-af69-43a8-8771-c039c8039a35\" (UID: \"e5b7f132-af69-43a8-8771-c039c8039a35\") " Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.344272 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5b7f132-af69-43a8-8771-c039c8039a35-kube-api-access-hnhrx" (OuterVolumeSpecName: "kube-api-access-hnhrx") pod "e5b7f132-af69-43a8-8771-c039c8039a35" (UID: "e5b7f132-af69-43a8-8771-c039c8039a35"). InnerVolumeSpecName "kube-api-access-hnhrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.347533 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-scripts" (OuterVolumeSpecName: "scripts") pod "e5b7f132-af69-43a8-8771-c039c8039a35" (UID: "e5b7f132-af69-43a8-8771-c039c8039a35"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.378526 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-config-data" (OuterVolumeSpecName: "config-data") pod "e5b7f132-af69-43a8-8771-c039c8039a35" (UID: "e5b7f132-af69-43a8-8771-c039c8039a35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.381907 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5b7f132-af69-43a8-8771-c039c8039a35" (UID: "e5b7f132-af69-43a8-8771-c039c8039a35"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.440668 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnhrx\" (UniqueName: \"kubernetes.io/projected/e5b7f132-af69-43a8-8771-c039c8039a35-kube-api-access-hnhrx\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.440715 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.440733 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.440745 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b7f132-af69-43a8-8771-c039c8039a35-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.656290 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-lpr6d" event={"ID":"e5b7f132-af69-43a8-8771-c039c8039a35","Type":"ContainerDied","Data":"2486e9d8170a0b6dafccce734d1b48cac7c3dac1483266b420dcd9f01fb176b8"} Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.656365 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2486e9d8170a0b6dafccce734d1b48cac7c3dac1483266b420dcd9f01fb176b8" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.657306 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-lpr6d" Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.735564 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.735878 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" containerName="nova-scheduler-scheduler" containerID="cri-o://ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda" gracePeriod=30 Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.748241 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.748642 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-log" containerID="cri-o://da62c083db84e6a1f34a1aefd714ad86e2f97f928942dcc9b49e2d1a8c014c04" gracePeriod=30 Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.748701 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-api" containerID="cri-o://d46653cc79e460941ecff85013bf4237dc6f1962f13b3ce9bacdb467b2f550da" gracePeriod=30 Jan 28 17:03:59 crc kubenswrapper[4877]: I0128 17:03:59.788234 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.022351 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.157923 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghbnh\" (UniqueName: \"kubernetes.io/projected/e4954890-1a90-4904-a7d8-f286d1b56745-kube-api-access-ghbnh\") pod \"e4954890-1a90-4904-a7d8-f286d1b56745\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.158011 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-scripts\") pod \"e4954890-1a90-4904-a7d8-f286d1b56745\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.158179 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-config-data\") pod \"e4954890-1a90-4904-a7d8-f286d1b56745\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.159079 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-combined-ca-bundle\") pod \"e4954890-1a90-4904-a7d8-f286d1b56745\" (UID: \"e4954890-1a90-4904-a7d8-f286d1b56745\") " Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.179430 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4954890-1a90-4904-a7d8-f286d1b56745-kube-api-access-ghbnh" (OuterVolumeSpecName: "kube-api-access-ghbnh") pod "e4954890-1a90-4904-a7d8-f286d1b56745" (UID: "e4954890-1a90-4904-a7d8-f286d1b56745"). InnerVolumeSpecName "kube-api-access-ghbnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.185975 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-scripts" (OuterVolumeSpecName: "scripts") pod "e4954890-1a90-4904-a7d8-f286d1b56745" (UID: "e4954890-1a90-4904-a7d8-f286d1b56745"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.204374 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4954890-1a90-4904-a7d8-f286d1b56745" (UID: "e4954890-1a90-4904-a7d8-f286d1b56745"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.208315 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-config-data" (OuterVolumeSpecName: "config-data") pod "e4954890-1a90-4904-a7d8-f286d1b56745" (UID: "e4954890-1a90-4904-a7d8-f286d1b56745"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.262529 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghbnh\" (UniqueName: \"kubernetes.io/projected/e4954890-1a90-4904-a7d8-f286d1b56745-kube-api-access-ghbnh\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.262566 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.262576 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.262585 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4954890-1a90-4904-a7d8-f286d1b56745-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:00 crc kubenswrapper[4877]: E0128 17:04:00.358102 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 17:04:00 crc kubenswrapper[4877]: E0128 17:04:00.361530 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 17:04:00 crc kubenswrapper[4877]: E0128 17:04:00.363057 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 17:04:00 crc kubenswrapper[4877]: E0128 17:04:00.363110 4877 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" containerName="nova-scheduler-scheduler" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.670161 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" event={"ID":"e4954890-1a90-4904-a7d8-f286d1b56745","Type":"ContainerDied","Data":"b89afaf5b7c8b1a6fa9758a968da016004140cf553977a5476febe7b0fccfdc4"} Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.670210 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b89afaf5b7c8b1a6fa9758a968da016004140cf553977a5476febe7b0fccfdc4" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.670273 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-f5bgl" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.676505 4877 generic.go:334] "Generic (PLEG): container finished" podID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerID="da62c083db84e6a1f34a1aefd714ad86e2f97f928942dcc9b49e2d1a8c014c04" exitCode=143 Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.676602 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ef8d5f-555c-4e83-ad96-9d63b95c3684","Type":"ContainerDied","Data":"da62c083db84e6a1f34a1aefd714ad86e2f97f928942dcc9b49e2d1a8c014c04"} Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.676812 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-log" containerID="cri-o://327b181becbe2a488a11c6dbf2cb97ca7e5c9e5fd60c1e3ee4bfffebcb985f14" gracePeriod=30 Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.676956 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-metadata" containerID="cri-o://8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea" gracePeriod=30 Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.755558 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 17:04:00 crc kubenswrapper[4877]: E0128 17:04:00.756132 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5b7f132-af69-43a8-8771-c039c8039a35" containerName="nova-manage" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.756152 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5b7f132-af69-43a8-8771-c039c8039a35" containerName="nova-manage" Jan 28 17:04:00 crc kubenswrapper[4877]: E0128 17:04:00.756183 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4954890-1a90-4904-a7d8-f286d1b56745" containerName="nova-cell1-conductor-db-sync" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.756190 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4954890-1a90-4904-a7d8-f286d1b56745" containerName="nova-cell1-conductor-db-sync" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.756416 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5b7f132-af69-43a8-8771-c039c8039a35" containerName="nova-manage" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.756444 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4954890-1a90-4904-a7d8-f286d1b56745" containerName="nova-cell1-conductor-db-sync" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.757359 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.762535 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.775787 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.878198 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c7a0465-0804-4991-aa52-01b146083b2e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.878551 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c7a0465-0804-4991-aa52-01b146083b2e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.879240 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvhxl\" (UniqueName: \"kubernetes.io/projected/0c7a0465-0804-4991-aa52-01b146083b2e-kube-api-access-zvhxl\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.981921 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvhxl\" (UniqueName: \"kubernetes.io/projected/0c7a0465-0804-4991-aa52-01b146083b2e-kube-api-access-zvhxl\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.982017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c7a0465-0804-4991-aa52-01b146083b2e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.982038 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c7a0465-0804-4991-aa52-01b146083b2e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.986279 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c7a0465-0804-4991-aa52-01b146083b2e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.986771 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c7a0465-0804-4991-aa52-01b146083b2e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:00 crc kubenswrapper[4877]: I0128 17:04:00.998001 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvhxl\" (UniqueName: \"kubernetes.io/projected/0c7a0465-0804-4991-aa52-01b146083b2e-kube-api-access-zvhxl\") pod \"nova-cell1-conductor-0\" (UID: \"0c7a0465-0804-4991-aa52-01b146083b2e\") " pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.084226 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.128634 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.240775 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.241930 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.284047 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-xngcd"] Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.291090 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" podUID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerName="dnsmasq-dns" containerID="cri-o://bfe39903a2895869c905d25d49dc367f979b5189fd0b55c77792cb7296f78e17" gracePeriod=10 Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.678840 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.695059 4877 generic.go:334] "Generic (PLEG): container finished" podID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerID="8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea" exitCode=0 Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.695101 4877 generic.go:334] "Generic (PLEG): container finished" podID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerID="327b181becbe2a488a11c6dbf2cb97ca7e5c9e5fd60c1e3ee4bfffebcb985f14" exitCode=143 Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.695096 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8be733b9-ce02-4f46-a417-ce3d66f58aab","Type":"ContainerDied","Data":"8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea"} Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.695139 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8be733b9-ce02-4f46-a417-ce3d66f58aab","Type":"ContainerDied","Data":"327b181becbe2a488a11c6dbf2cb97ca7e5c9e5fd60c1e3ee4bfffebcb985f14"} Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.699056 4877 generic.go:334] "Generic (PLEG): container finished" podID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerID="bfe39903a2895869c905d25d49dc367f979b5189fd0b55c77792cb7296f78e17" exitCode=0 Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.699092 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" event={"ID":"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5","Type":"ContainerDied","Data":"bfe39903a2895869c905d25d49dc367f979b5189fd0b55c77792cb7296f78e17"} Jan 28 17:04:01 crc kubenswrapper[4877]: I0128 17:04:01.925105 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.033283 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-sb\") pod \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.033414 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-svc\") pod \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.033602 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6wds\" (UniqueName: \"kubernetes.io/projected/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-kube-api-access-c6wds\") pod \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.033696 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-nb\") pod \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.033885 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-config\") pod \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.034053 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-swift-storage-0\") pod \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\" (UID: \"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.041391 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-kube-api-access-c6wds" (OuterVolumeSpecName: "kube-api-access-c6wds") pod "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" (UID: "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5"). InnerVolumeSpecName "kube-api-access-c6wds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.109400 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" (UID: "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.138267 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6wds\" (UniqueName: \"kubernetes.io/projected/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-kube-api-access-c6wds\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.138307 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.175741 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" (UID: "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.190863 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-config" (OuterVolumeSpecName: "config") pod "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" (UID: "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.233014 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" (UID: "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.240856 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.240895 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.240908 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.253941 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" (UID: "3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.343095 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.372200 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.444858 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grl5n\" (UniqueName: \"kubernetes.io/projected/8be733b9-ce02-4f46-a417-ce3d66f58aab-kube-api-access-grl5n\") pod \"8be733b9-ce02-4f46-a417-ce3d66f58aab\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.445011 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8be733b9-ce02-4f46-a417-ce3d66f58aab-logs\") pod \"8be733b9-ce02-4f46-a417-ce3d66f58aab\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.445116 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-combined-ca-bundle\") pod \"8be733b9-ce02-4f46-a417-ce3d66f58aab\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.445180 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-config-data\") pod \"8be733b9-ce02-4f46-a417-ce3d66f58aab\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.445247 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-nova-metadata-tls-certs\") pod \"8be733b9-ce02-4f46-a417-ce3d66f58aab\" (UID: \"8be733b9-ce02-4f46-a417-ce3d66f58aab\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.446899 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8be733b9-ce02-4f46-a417-ce3d66f58aab-logs" (OuterVolumeSpecName: "logs") pod "8be733b9-ce02-4f46-a417-ce3d66f58aab" (UID: "8be733b9-ce02-4f46-a417-ce3d66f58aab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.451631 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8be733b9-ce02-4f46-a417-ce3d66f58aab-kube-api-access-grl5n" (OuterVolumeSpecName: "kube-api-access-grl5n") pod "8be733b9-ce02-4f46-a417-ce3d66f58aab" (UID: "8be733b9-ce02-4f46-a417-ce3d66f58aab"). InnerVolumeSpecName "kube-api-access-grl5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.491129 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-config-data" (OuterVolumeSpecName: "config-data") pod "8be733b9-ce02-4f46-a417-ce3d66f58aab" (UID: "8be733b9-ce02-4f46-a417-ce3d66f58aab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.535637 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8be733b9-ce02-4f46-a417-ce3d66f58aab" (UID: "8be733b9-ce02-4f46-a417-ce3d66f58aab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.548527 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grl5n\" (UniqueName: \"kubernetes.io/projected/8be733b9-ce02-4f46-a417-ce3d66f58aab-kube-api-access-grl5n\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.548646 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8be733b9-ce02-4f46-a417-ce3d66f58aab-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.548659 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.548669 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.564589 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8be733b9-ce02-4f46-a417-ce3d66f58aab" (UID: "8be733b9-ce02-4f46-a417-ce3d66f58aab"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.651289 4877 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8be733b9-ce02-4f46-a417-ce3d66f58aab-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.716588 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"0c7a0465-0804-4991-aa52-01b146083b2e","Type":"ContainerStarted","Data":"edf9cf99de6a80ee5ad65c40f3dd371ba96dd7a573fa083efcbf73f521e2ef52"} Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.716645 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"0c7a0465-0804-4991-aa52-01b146083b2e","Type":"ContainerStarted","Data":"3dfa070fc0e05f614961d7e3e8f539d9c5c4cf08acb245ac6661e5902bd9971f"} Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.717167 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.720744 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" event={"ID":"3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5","Type":"ContainerDied","Data":"1ea38fc69c7b481dec51d59ba0201d5e7212d5abdeda5f26a0fea425e607ef08"} Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.720816 4877 scope.go:117] "RemoveContainer" containerID="bfe39903a2895869c905d25d49dc367f979b5189fd0b55c77792cb7296f78e17" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.720742 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-xngcd" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.728943 4877 generic.go:334] "Generic (PLEG): container finished" podID="3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" containerID="ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda" exitCode=0 Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.729032 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2","Type":"ContainerDied","Data":"ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda"} Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.731423 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8be733b9-ce02-4f46-a417-ce3d66f58aab","Type":"ContainerDied","Data":"411f0b7c87bdd79d3c13e1ba3ee3e7316dd00867a3ce4e6a714a03ff671ef7db"} Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.731529 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.754091 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.75407096 podStartE2EDuration="2.75407096s" podCreationTimestamp="2026-01-28 17:04:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:02.742436997 +0000 UTC m=+1746.300763885" watchObservedRunningTime="2026-01-28 17:04:02.75407096 +0000 UTC m=+1746.312397848" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.788923 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-xngcd"] Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.806526 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-xngcd"] Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.828124 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.860065 4877 scope.go:117] "RemoveContainer" containerID="ad05e20ac7e9b3f29c72ff0a8c3b5998fc01159cb585aa14c538653ec622730e" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.860929 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.879173 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.881586 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:02 crc kubenswrapper[4877]: E0128 17:04:02.882080 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerName="init" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882101 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerName="init" Jan 28 17:04:02 crc kubenswrapper[4877]: E0128 17:04:02.882119 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-log" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882127 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-log" Jan 28 17:04:02 crc kubenswrapper[4877]: E0128 17:04:02.882143 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-metadata" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882152 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-metadata" Jan 28 17:04:02 crc kubenswrapper[4877]: E0128 17:04:02.882180 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" containerName="nova-scheduler-scheduler" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882188 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" containerName="nova-scheduler-scheduler" Jan 28 17:04:02 crc kubenswrapper[4877]: E0128 17:04:02.882246 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerName="dnsmasq-dns" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882255 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerName="dnsmasq-dns" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882567 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" containerName="dnsmasq-dns" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882591 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-metadata" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882612 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" containerName="nova-metadata-log" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.882633 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" containerName="nova-scheduler-scheduler" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.884202 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.885992 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.886241 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.899989 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.963970 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rqvd\" (UniqueName: \"kubernetes.io/projected/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-kube-api-access-6rqvd\") pod \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.964259 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-config-data\") pod \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.964324 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-combined-ca-bundle\") pod \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\" (UID: \"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2\") " Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.964719 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.964753 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-config-data\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.964811 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhnl2\" (UniqueName: \"kubernetes.io/projected/617dd54a-0eba-4cb2-8108-11abfdc50ce9-kube-api-access-qhnl2\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.964876 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/617dd54a-0eba-4cb2-8108-11abfdc50ce9-logs\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.965020 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:02 crc kubenswrapper[4877]: I0128 17:04:02.970543 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-kube-api-access-6rqvd" (OuterVolumeSpecName: "kube-api-access-6rqvd") pod "3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" (UID: "3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2"). InnerVolumeSpecName "kube-api-access-6rqvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.003048 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-config-data" (OuterVolumeSpecName: "config-data") pod "3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" (UID: "3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.013490 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" (UID: "3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.070329 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.070408 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-config-data\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.070533 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhnl2\" (UniqueName: \"kubernetes.io/projected/617dd54a-0eba-4cb2-8108-11abfdc50ce9-kube-api-access-qhnl2\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.070674 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/617dd54a-0eba-4cb2-8108-11abfdc50ce9-logs\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.070875 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.071045 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rqvd\" (UniqueName: \"kubernetes.io/projected/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-kube-api-access-6rqvd\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.071095 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.071110 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.073141 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/617dd54a-0eba-4cb2-8108-11abfdc50ce9-logs\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.074266 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.076416 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-config-data\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.082754 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.095365 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhnl2\" (UniqueName: \"kubernetes.io/projected/617dd54a-0eba-4cb2-8108-11abfdc50ce9-kube-api-access-qhnl2\") pod \"nova-metadata-0\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.097310 4877 scope.go:117] "RemoveContainer" containerID="8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.213432 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.387298 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5" path="/var/lib/kubelet/pods/3d5a5ba0-fa85-4ec4-ac7f-49a60528bde5/volumes" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.389248 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8be733b9-ce02-4f46-a417-ce3d66f58aab" path="/var/lib/kubelet/pods/8be733b9-ce02-4f46-a417-ce3d66f58aab/volumes" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.676605 4877 scope.go:117] "RemoveContainer" containerID="327b181becbe2a488a11c6dbf2cb97ca7e5c9e5fd60c1e3ee4bfffebcb985f14" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.786446 4877 generic.go:334] "Generic (PLEG): container finished" podID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerID="d46653cc79e460941ecff85013bf4237dc6f1962f13b3ce9bacdb467b2f550da" exitCode=0 Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.786540 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ef8d5f-555c-4e83-ad96-9d63b95c3684","Type":"ContainerDied","Data":"d46653cc79e460941ecff85013bf4237dc6f1962f13b3ce9bacdb467b2f550da"} Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.795567 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.795566 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2","Type":"ContainerDied","Data":"aa5c1ec95591ff8bb5b017cb55e428044bd0b275db37e7c1314e16eba90d2306"} Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.795767 4877 scope.go:117] "RemoveContainer" containerID="ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.848449 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.850698 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.876176 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.894766 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:03 crc kubenswrapper[4877]: E0128 17:04:03.895411 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-log" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.895440 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-log" Jan 28 17:04:03 crc kubenswrapper[4877]: E0128 17:04:03.895466 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-api" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.895623 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-api" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.895917 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-api" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.895948 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" containerName="nova-api-log" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.896988 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.900898 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 17:04:03 crc kubenswrapper[4877]: I0128 17:04:03.925499 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.026700 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ef8d5f-555c-4e83-ad96-9d63b95c3684-logs\") pod \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.026795 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-config-data\") pod \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.026939 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-combined-ca-bundle\") pod \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.026990 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcx8w\" (UniqueName: \"kubernetes.io/projected/77ef8d5f-555c-4e83-ad96-9d63b95c3684-kube-api-access-fcx8w\") pod \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\" (UID: \"77ef8d5f-555c-4e83-ad96-9d63b95c3684\") " Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.027588 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.027822 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-config-data\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.027892 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdddx\" (UniqueName: \"kubernetes.io/projected/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-kube-api-access-wdddx\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.027975 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77ef8d5f-555c-4e83-ad96-9d63b95c3684-logs" (OuterVolumeSpecName: "logs") pod "77ef8d5f-555c-4e83-ad96-9d63b95c3684" (UID: "77ef8d5f-555c-4e83-ad96-9d63b95c3684"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.032800 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77ef8d5f-555c-4e83-ad96-9d63b95c3684-kube-api-access-fcx8w" (OuterVolumeSpecName: "kube-api-access-fcx8w") pod "77ef8d5f-555c-4e83-ad96-9d63b95c3684" (UID: "77ef8d5f-555c-4e83-ad96-9d63b95c3684"). InnerVolumeSpecName "kube-api-access-fcx8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.058265 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-config-data" (OuterVolumeSpecName: "config-data") pod "77ef8d5f-555c-4e83-ad96-9d63b95c3684" (UID: "77ef8d5f-555c-4e83-ad96-9d63b95c3684"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.074888 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77ef8d5f-555c-4e83-ad96-9d63b95c3684" (UID: "77ef8d5f-555c-4e83-ad96-9d63b95c3684"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.130321 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.130523 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-config-data\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.130565 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdddx\" (UniqueName: \"kubernetes.io/projected/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-kube-api-access-wdddx\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.130767 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77ef8d5f-555c-4e83-ad96-9d63b95c3684-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.130789 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.130801 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ef8d5f-555c-4e83-ad96-9d63b95c3684-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.130815 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcx8w\" (UniqueName: \"kubernetes.io/projected/77ef8d5f-555c-4e83-ad96-9d63b95c3684-kube-api-access-fcx8w\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.136430 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-config-data\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.139126 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.150875 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdddx\" (UniqueName: \"kubernetes.io/projected/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-kube-api-access-wdddx\") pod \"nova-scheduler-0\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.169672 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.254097 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.797354 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.811004 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"617dd54a-0eba-4cb2-8108-11abfdc50ce9","Type":"ContainerStarted","Data":"b34f14fdb2ee6a8fb522d4f3fddf57023f6f701cc02b1a570411f4cd1192e82b"} Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.811074 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"617dd54a-0eba-4cb2-8108-11abfdc50ce9","Type":"ContainerStarted","Data":"78580d8da4f9e778a21362e0f8fe231391e17f17cd90aa7f0185fcc0384b230e"} Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.811085 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"617dd54a-0eba-4cb2-8108-11abfdc50ce9","Type":"ContainerStarted","Data":"4b763789601210b42716afcd678afc26e9bc7592fafad640d6ef334c2b35dc72"} Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.815859 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.816349 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"77ef8d5f-555c-4e83-ad96-9d63b95c3684","Type":"ContainerDied","Data":"b4d642541c8fe9bde3ec2db698656a6b187be6592896ba4cfff0d3c3b7b29e30"} Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.816425 4877 scope.go:117] "RemoveContainer" containerID="d46653cc79e460941ecff85013bf4237dc6f1962f13b3ce9bacdb467b2f550da" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.851030 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.851011438 podStartE2EDuration="2.851011438s" podCreationTimestamp="2026-01-28 17:04:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:04.837645707 +0000 UTC m=+1748.395972605" watchObservedRunningTime="2026-01-28 17:04:04.851011438 +0000 UTC m=+1748.409338326" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.884681 4877 scope.go:117] "RemoveContainer" containerID="da62c083db84e6a1f34a1aefd714ad86e2f97f928942dcc9b49e2d1a8c014c04" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.899582 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.920948 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.957002 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.959056 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.963611 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 17:04:04 crc kubenswrapper[4877]: I0128 17:04:04.970785 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.060658 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbqfq\" (UniqueName: \"kubernetes.io/projected/5886ef03-0905-4c4c-89cf-7de7804f8e5a-kube-api-access-bbqfq\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.060850 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.060955 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5886ef03-0905-4c4c-89cf-7de7804f8e5a-logs\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.061018 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-config-data\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.162791 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.162906 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5886ef03-0905-4c4c-89cf-7de7804f8e5a-logs\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.162947 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-config-data\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.163148 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbqfq\" (UniqueName: \"kubernetes.io/projected/5886ef03-0905-4c4c-89cf-7de7804f8e5a-kube-api-access-bbqfq\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.163765 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5886ef03-0905-4c4c-89cf-7de7804f8e5a-logs\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.170741 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.170911 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-config-data\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.181728 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbqfq\" (UniqueName: \"kubernetes.io/projected/5886ef03-0905-4c4c-89cf-7de7804f8e5a-kube-api-access-bbqfq\") pod \"nova-api-0\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.281127 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.346433 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2" path="/var/lib/kubelet/pods/3fa9f20a-6b0c-46f8-8bec-776e68ee5fb2/volumes" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.347429 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77ef8d5f-555c-4e83-ad96-9d63b95c3684" path="/var/lib/kubelet/pods/77ef8d5f-555c-4e83-ad96-9d63b95c3684/volumes" Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.798700 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.837418 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"72716d2a-bddd-4fdf-99ac-6dbca5f41cae","Type":"ContainerStarted","Data":"3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9"} Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.837507 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"72716d2a-bddd-4fdf-99ac-6dbca5f41cae","Type":"ContainerStarted","Data":"d2a2f509bf34254b9d6a0bbcffb462f040aee876e3c9064327e926b6e4bb2682"} Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.840813 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5886ef03-0905-4c4c-89cf-7de7804f8e5a","Type":"ContainerStarted","Data":"5d94523ead590c984da7464e5f96c0ee48663c1582ff2220ca48e7eba9bfa88b"} Jan 28 17:04:05 crc kubenswrapper[4877]: I0128 17:04:05.873084 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.8730612239999997 podStartE2EDuration="2.873061224s" podCreationTimestamp="2026-01-28 17:04:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:05.853686942 +0000 UTC m=+1749.412013870" watchObservedRunningTime="2026-01-28 17:04:05.873061224 +0000 UTC m=+1749.431388122" Jan 28 17:04:06 crc kubenswrapper[4877]: I0128 17:04:06.119610 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 28 17:04:06 crc kubenswrapper[4877]: I0128 17:04:06.857828 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5886ef03-0905-4c4c-89cf-7de7804f8e5a","Type":"ContainerStarted","Data":"c3b055a7054faeb73cdfbfab890a5b3cf062179dcd2635991b2ffa97c535f6b7"} Jan 28 17:04:06 crc kubenswrapper[4877]: I0128 17:04:06.858148 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5886ef03-0905-4c4c-89cf-7de7804f8e5a","Type":"ContainerStarted","Data":"2203cc83e2964ab1aa11525d5c20cae3b5e49a9c84e8069d807b07a1456f5448"} Jan 28 17:04:07 crc kubenswrapper[4877]: W0128 17:04:07.658613 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8be733b9_ce02_4f46_a417_ce3d66f58aab.slice/crio-8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea.scope WatchSource:0}: Error finding container 8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea: Status 404 returned error can't find the container with id 8e808207def126e340c4b3b19c421931bf43ea3890bc0a383c62cf8c721b4fea Jan 28 17:04:07 crc kubenswrapper[4877]: E0128 17:04:07.825842 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04e4e30f_0823_4599_b3c3_b71a8630547f.slice/crio-485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04e4e30f_0823_4599_b3c3_b71a8630547f.slice/crio-conmon-485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50.scope\": RecentStats: unable to find data in memory cache]" Jan 28 17:04:07 crc kubenswrapper[4877]: E0128 17:04:07.827207 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5a5ba0_fa85_4ec4_ac7f_49a60528bde5.slice/crio-1ea38fc69c7b481dec51d59ba0201d5e7212d5abdeda5f26a0fea425e607ef08\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3fa9f20a_6b0c_46f8_8bec_776e68ee5fb2.slice/crio-ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ef8d5f_555c_4e83_ad96_9d63b95c3684.slice/crio-conmon-d46653cc79e460941ecff85013bf4237dc6f1962f13b3ce9bacdb467b2f550da.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4954890_1a90_4904_a7d8_f286d1b56745.slice/crio-05cf205cb7271560cfdd0cac3ffd18218092b8634d3059295d86db5e3ec969aa.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5b7f132_af69_43a8_8771_c039c8039a35.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ef8d5f_555c_4e83_ad96_9d63b95c3684.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8be733b9_ce02_4f46_a417_ce3d66f58aab.slice/crio-327b181becbe2a488a11c6dbf2cb97ca7e5c9e5fd60c1e3ee4bfffebcb985f14.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5b7f132_af69_43a8_8771_c039c8039a35.slice/crio-2486e9d8170a0b6dafccce734d1b48cac7c3dac1483266b420dcd9f01fb176b8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5b7f132_af69_43a8_8771_c039c8039a35.slice/crio-conmon-97f20036790abdca2e5eceae7ba75fd06efad3ef727f8220c097ad21982b0b86.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4954890_1a90_4904_a7d8_f286d1b56745.slice/crio-conmon-05cf205cb7271560cfdd0cac3ffd18218092b8634d3059295d86db5e3ec969aa.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4954890_1a90_4904_a7d8_f286d1b56745.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ef8d5f_555c_4e83_ad96_9d63b95c3684.slice/crio-d46653cc79e460941ecff85013bf4237dc6f1962f13b3ce9bacdb467b2f550da.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8be733b9_ce02_4f46_a417_ce3d66f58aab.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5a5ba0_fa85_4ec4_ac7f_49a60528bde5.slice/crio-bfe39903a2895869c905d25d49dc367f979b5189fd0b55c77792cb7296f78e17.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4954890_1a90_4904_a7d8_f286d1b56745.slice/crio-b89afaf5b7c8b1a6fa9758a968da016004140cf553977a5476febe7b0fccfdc4\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04e4e30f_0823_4599_b3c3_b71a8630547f.slice/crio-485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3fa9f20a_6b0c_46f8_8bec_776e68ee5fb2.slice/crio-aa5c1ec95591ff8bb5b017cb55e428044bd0b275db37e7c1314e16eba90d2306\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5b7f132_af69_43a8_8771_c039c8039a35.slice/crio-97f20036790abdca2e5eceae7ba75fd06efad3ef727f8220c097ad21982b0b86.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3fa9f20a_6b0c_46f8_8bec_776e68ee5fb2.slice/crio-conmon-ace3c78a7c74d3425e4c0e417e939d1e77b6cabf70413aac222a8e888ca49cda.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ef8d5f_555c_4e83_ad96_9d63b95c3684.slice/crio-da62c083db84e6a1f34a1aefd714ad86e2f97f928942dcc9b49e2d1a8c014c04.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04e4e30f_0823_4599_b3c3_b71a8630547f.slice/crio-conmon-485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8be733b9_ce02_4f46_a417_ce3d66f58aab.slice/crio-conmon-327b181becbe2a488a11c6dbf2cb97ca7e5c9e5fd60c1e3ee4bfffebcb985f14.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ef8d5f_555c_4e83_ad96_9d63b95c3684.slice/crio-conmon-da62c083db84e6a1f34a1aefd714ad86e2f97f928942dcc9b49e2d1a8c014c04.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5a5ba0_fa85_4ec4_ac7f_49a60528bde5.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ef8d5f_555c_4e83_ad96_9d63b95c3684.slice/crio-b4d642541c8fe9bde3ec2db698656a6b187be6592896ba4cfff0d3c3b7b29e30\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5a5ba0_fa85_4ec4_ac7f_49a60528bde5.slice/crio-conmon-bfe39903a2895869c905d25d49dc367f979b5189fd0b55c77792cb7296f78e17.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3fa9f20a_6b0c_46f8_8bec_776e68ee5fb2.slice\": RecentStats: unable to find data in memory cache]" Jan 28 17:04:07 crc kubenswrapper[4877]: I0128 17:04:07.962251 4877 generic.go:334] "Generic (PLEG): container finished" podID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerID="485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50" exitCode=137 Jan 28 17:04:07 crc kubenswrapper[4877]: I0128 17:04:07.968775 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerDied","Data":"485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50"} Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.214552 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.215178 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.363564 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.417714 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.41769504 podStartE2EDuration="4.41769504s" podCreationTimestamp="2026-01-28 17:04:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:06.890783218 +0000 UTC m=+1750.449110106" watchObservedRunningTime="2026-01-28 17:04:08.41769504 +0000 UTC m=+1751.976021928" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.447339 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcnzd\" (UniqueName: \"kubernetes.io/projected/04e4e30f-0823-4599-b3c3-b71a8630547f-kube-api-access-bcnzd\") pod \"04e4e30f-0823-4599-b3c3-b71a8630547f\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.447654 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-config-data\") pod \"04e4e30f-0823-4599-b3c3-b71a8630547f\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.447931 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-scripts\") pod \"04e4e30f-0823-4599-b3c3-b71a8630547f\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.447968 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-combined-ca-bundle\") pod \"04e4e30f-0823-4599-b3c3-b71a8630547f\" (UID: \"04e4e30f-0823-4599-b3c3-b71a8630547f\") " Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.457299 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04e4e30f-0823-4599-b3c3-b71a8630547f-kube-api-access-bcnzd" (OuterVolumeSpecName: "kube-api-access-bcnzd") pod "04e4e30f-0823-4599-b3c3-b71a8630547f" (UID: "04e4e30f-0823-4599-b3c3-b71a8630547f"). InnerVolumeSpecName "kube-api-access-bcnzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.473828 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-scripts" (OuterVolumeSpecName: "scripts") pod "04e4e30f-0823-4599-b3c3-b71a8630547f" (UID: "04e4e30f-0823-4599-b3c3-b71a8630547f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.550614 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.550652 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcnzd\" (UniqueName: \"kubernetes.io/projected/04e4e30f-0823-4599-b3c3-b71a8630547f-kube-api-access-bcnzd\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.654744 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04e4e30f-0823-4599-b3c3-b71a8630547f" (UID: "04e4e30f-0823-4599-b3c3-b71a8630547f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.665937 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.740119 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-config-data" (OuterVolumeSpecName: "config-data") pod "04e4e30f-0823-4599-b3c3-b71a8630547f" (UID: "04e4e30f-0823-4599-b3c3-b71a8630547f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.787806 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04e4e30f-0823-4599-b3c3-b71a8630547f-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.983602 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.983751 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"04e4e30f-0823-4599-b3c3-b71a8630547f","Type":"ContainerDied","Data":"6fedd884eba7d7495b298097d774d64057bc01b05585ce4f3a7fc8446abacab5"} Jan 28 17:04:08 crc kubenswrapper[4877]: I0128 17:04:08.984943 4877 scope.go:117] "RemoveContainer" containerID="485ac4ec342e90aa666f546807af9256ea9d496778c138bbd9f263e9a9c6cb50" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.023076 4877 scope.go:117] "RemoveContainer" containerID="f30427682927d6697d795ec003cd86c1773d146d41690818787e684314926fef" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.063702 4877 scope.go:117] "RemoveContainer" containerID="260d70b88dfb5bc985a90b895c2fad459081454e9c8300e287c7fb1511a4c6bc" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.097825 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.098052 4877 scope.go:117] "RemoveContainer" containerID="c253ce01bf3aa86a94d013e3a7fac2725ea4e3bd96ed54d591dc9dde378f6012" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.139842 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.155012 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 17:04:09 crc kubenswrapper[4877]: E0128 17:04:09.155833 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-api" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.155852 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-api" Jan 28 17:04:09 crc kubenswrapper[4877]: E0128 17:04:09.155874 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-notifier" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.155880 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-notifier" Jan 28 17:04:09 crc kubenswrapper[4877]: E0128 17:04:09.155899 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-listener" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.155906 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-listener" Jan 28 17:04:09 crc kubenswrapper[4877]: E0128 17:04:09.155965 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-evaluator" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.155974 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-evaluator" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.156544 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-evaluator" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.156567 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-notifier" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.156589 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-api" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.156645 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" containerName="aodh-listener" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.159996 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.167915 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-nxphm" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.168248 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.168387 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.168745 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.169274 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.179640 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.254298 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.324500 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjtq5\" (UniqueName: \"kubernetes.io/projected/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-kube-api-access-mjtq5\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.324600 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-scripts\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.324625 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-internal-tls-certs\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.324714 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.324743 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-config-data\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.324764 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-public-tls-certs\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.348115 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04e4e30f-0823-4599-b3c3-b71a8630547f" path="/var/lib/kubelet/pods/04e4e30f-0823-4599-b3c3-b71a8630547f/volumes" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.427353 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjtq5\" (UniqueName: \"kubernetes.io/projected/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-kube-api-access-mjtq5\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.427740 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-scripts\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.427767 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-internal-tls-certs\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.427817 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.427890 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-config-data\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.427931 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-public-tls-certs\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.431733 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-scripts\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.432534 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-internal-tls-certs\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.433418 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-config-data\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.434064 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.437700 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-public-tls-certs\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.480974 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjtq5\" (UniqueName: \"kubernetes.io/projected/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-kube-api-access-mjtq5\") pod \"aodh-0\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " pod="openstack/aodh-0" Jan 28 17:04:09 crc kubenswrapper[4877]: I0128 17:04:09.494893 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:04:10 crc kubenswrapper[4877]: I0128 17:04:10.160179 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 17:04:10 crc kubenswrapper[4877]: I0128 17:04:10.331270 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:04:10 crc kubenswrapper[4877]: E0128 17:04:10.331830 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:04:11 crc kubenswrapper[4877]: I0128 17:04:11.016009 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerStarted","Data":"1f03e3acfe26945d0e816f9f9b238eff89cc3934354280361cb801a83a558b04"} Jan 28 17:04:12 crc kubenswrapper[4877]: I0128 17:04:12.031196 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerStarted","Data":"24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71"} Jan 28 17:04:12 crc kubenswrapper[4877]: I0128 17:04:12.031747 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerStarted","Data":"cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715"} Jan 28 17:04:13 crc kubenswrapper[4877]: I0128 17:04:13.052008 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerStarted","Data":"1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6"} Jan 28 17:04:13 crc kubenswrapper[4877]: I0128 17:04:13.214384 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 17:04:13 crc kubenswrapper[4877]: I0128 17:04:13.215025 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 17:04:14 crc kubenswrapper[4877]: I0128 17:04:14.066563 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerStarted","Data":"548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b"} Jan 28 17:04:14 crc kubenswrapper[4877]: I0128 17:04:14.095270 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.259829803 podStartE2EDuration="5.095245845s" podCreationTimestamp="2026-01-28 17:04:09 +0000 UTC" firstStartedPulling="2026-01-28 17:04:10.174643425 +0000 UTC m=+1753.732970343" lastFinishedPulling="2026-01-28 17:04:13.010059497 +0000 UTC m=+1756.568386385" observedRunningTime="2026-01-28 17:04:14.08799225 +0000 UTC m=+1757.646319138" watchObservedRunningTime="2026-01-28 17:04:14.095245845 +0000 UTC m=+1757.653572743" Jan 28 17:04:14 crc kubenswrapper[4877]: I0128 17:04:14.189108 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 17:04:14 crc kubenswrapper[4877]: I0128 17:04:14.242801 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:04:14 crc kubenswrapper[4877]: I0128 17:04:14.242890 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:04:14 crc kubenswrapper[4877]: I0128 17:04:14.255732 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 17:04:14 crc kubenswrapper[4877]: I0128 17:04:14.306435 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 17:04:15 crc kubenswrapper[4877]: I0128 17:04:15.107459 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 17:04:15 crc kubenswrapper[4877]: I0128 17:04:15.282121 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:04:15 crc kubenswrapper[4877]: I0128 17:04:15.282177 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:04:16 crc kubenswrapper[4877]: I0128 17:04:16.364771 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.252:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 17:04:16 crc kubenswrapper[4877]: I0128 17:04:16.365560 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.252:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 17:04:19 crc kubenswrapper[4877]: I0128 17:04:19.488818 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 17:04:19 crc kubenswrapper[4877]: I0128 17:04:19.489553 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="08b23417-7d7c-4d16-85e0-4f06c5e9b314" containerName="kube-state-metrics" containerID="cri-o://cb9005be24322b55c269621c815b4ed453924a096386bfd30bb713d6ac3bad73" gracePeriod=30 Jan 28 17:04:19 crc kubenswrapper[4877]: I0128 17:04:19.639945 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 17:04:19 crc kubenswrapper[4877]: I0128 17:04:19.640207 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="a40862f9-9799-4bd4-9e3f-9d528cf5f50e" containerName="mysqld-exporter" containerID="cri-o://94f9545ebcef02a0b88178061a93c8b89f464c7ce525b689263733d646a36d7b" gracePeriod=30 Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.138208 4877 generic.go:334] "Generic (PLEG): container finished" podID="08b23417-7d7c-4d16-85e0-4f06c5e9b314" containerID="cb9005be24322b55c269621c815b4ed453924a096386bfd30bb713d6ac3bad73" exitCode=2 Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.138576 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"08b23417-7d7c-4d16-85e0-4f06c5e9b314","Type":"ContainerDied","Data":"cb9005be24322b55c269621c815b4ed453924a096386bfd30bb713d6ac3bad73"} Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.138610 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"08b23417-7d7c-4d16-85e0-4f06c5e9b314","Type":"ContainerDied","Data":"a6dcb1c1e2325f9da6e7f731e5e8640b86799599cdb46640a3b9effa5b4e72bd"} Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.138692 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6dcb1c1e2325f9da6e7f731e5e8640b86799599cdb46640a3b9effa5b4e72bd" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.161804 4877 generic.go:334] "Generic (PLEG): container finished" podID="a40862f9-9799-4bd4-9e3f-9d528cf5f50e" containerID="94f9545ebcef02a0b88178061a93c8b89f464c7ce525b689263733d646a36d7b" exitCode=2 Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.161860 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"a40862f9-9799-4bd4-9e3f-9d528cf5f50e","Type":"ContainerDied","Data":"94f9545ebcef02a0b88178061a93c8b89f464c7ce525b689263733d646a36d7b"} Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.174982 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.318754 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.364089 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48vgj\" (UniqueName: \"kubernetes.io/projected/08b23417-7d7c-4d16-85e0-4f06c5e9b314-kube-api-access-48vgj\") pod \"08b23417-7d7c-4d16-85e0-4f06c5e9b314\" (UID: \"08b23417-7d7c-4d16-85e0-4f06c5e9b314\") " Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.374428 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08b23417-7d7c-4d16-85e0-4f06c5e9b314-kube-api-access-48vgj" (OuterVolumeSpecName: "kube-api-access-48vgj") pod "08b23417-7d7c-4d16-85e0-4f06c5e9b314" (UID: "08b23417-7d7c-4d16-85e0-4f06c5e9b314"). InnerVolumeSpecName "kube-api-access-48vgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.467179 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-combined-ca-bundle\") pod \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.467264 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85h2c\" (UniqueName: \"kubernetes.io/projected/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-kube-api-access-85h2c\") pod \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.467359 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-config-data\") pod \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\" (UID: \"a40862f9-9799-4bd4-9e3f-9d528cf5f50e\") " Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.468185 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48vgj\" (UniqueName: \"kubernetes.io/projected/08b23417-7d7c-4d16-85e0-4f06c5e9b314-kube-api-access-48vgj\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.480328 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-kube-api-access-85h2c" (OuterVolumeSpecName: "kube-api-access-85h2c") pod "a40862f9-9799-4bd4-9e3f-9d528cf5f50e" (UID: "a40862f9-9799-4bd4-9e3f-9d528cf5f50e"). InnerVolumeSpecName "kube-api-access-85h2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.514314 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a40862f9-9799-4bd4-9e3f-9d528cf5f50e" (UID: "a40862f9-9799-4bd4-9e3f-9d528cf5f50e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.549544 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-config-data" (OuterVolumeSpecName: "config-data") pod "a40862f9-9799-4bd4-9e3f-9d528cf5f50e" (UID: "a40862f9-9799-4bd4-9e3f-9d528cf5f50e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.575390 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.575499 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85h2c\" (UniqueName: \"kubernetes.io/projected/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-kube-api-access-85h2c\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:20 crc kubenswrapper[4877]: I0128 17:04:20.575517 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a40862f9-9799-4bd4-9e3f-9d528cf5f50e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.181505 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"a40862f9-9799-4bd4-9e3f-9d528cf5f50e","Type":"ContainerDied","Data":"9f376dfe1c648c1fe27dabd49a4837f468b13a2ee0f4605d055378af07fdd5bb"} Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.181549 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.181889 4877 scope.go:117] "RemoveContainer" containerID="94f9545ebcef02a0b88178061a93c8b89f464c7ce525b689263733d646a36d7b" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.181562 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.235071 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.249267 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.264896 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.289435 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.300923 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: E0128 17:04:21.301713 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08b23417-7d7c-4d16-85e0-4f06c5e9b314" containerName="kube-state-metrics" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.301816 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="08b23417-7d7c-4d16-85e0-4f06c5e9b314" containerName="kube-state-metrics" Jan 28 17:04:21 crc kubenswrapper[4877]: E0128 17:04:21.301872 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a40862f9-9799-4bd4-9e3f-9d528cf5f50e" containerName="mysqld-exporter" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.301927 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a40862f9-9799-4bd4-9e3f-9d528cf5f50e" containerName="mysqld-exporter" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.302187 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="a40862f9-9799-4bd4-9e3f-9d528cf5f50e" containerName="mysqld-exporter" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.302264 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="08b23417-7d7c-4d16-85e0-4f06c5e9b314" containerName="kube-state-metrics" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.303236 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.306271 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.309803 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.313072 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.327291 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.330744 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.331676 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:04:21 crc kubenswrapper[4877]: E0128 17:04:21.332024 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.334087 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.334192 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.356941 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08b23417-7d7c-4d16-85e0-4f06c5e9b314" path="/var/lib/kubelet/pods/08b23417-7d7c-4d16-85e0-4f06c5e9b314/volumes" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.357551 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a40862f9-9799-4bd4-9e3f-9d528cf5f50e" path="/var/lib/kubelet/pods/a40862f9-9799-4bd4-9e3f-9d528cf5f50e/volumes" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.358125 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.497980 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.498057 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.498136 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.498201 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-config-data\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.498233 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.498259 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-988lk\" (UniqueName: \"kubernetes.io/projected/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-api-access-988lk\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.498288 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwcxx\" (UniqueName: \"kubernetes.io/projected/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-kube-api-access-dwcxx\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.498328 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.600768 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.600871 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.600955 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.601032 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-config-data\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.601089 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.601130 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-988lk\" (UniqueName: \"kubernetes.io/projected/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-api-access-988lk\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.601167 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwcxx\" (UniqueName: \"kubernetes.io/projected/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-kube-api-access-dwcxx\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.601204 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.606933 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.607065 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.610210 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.610702 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.610711 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-config-data\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.611196 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.625253 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwcxx\" (UniqueName: \"kubernetes.io/projected/0e3b8959-6e09-4179-aa74-7fc6f43d85d0-kube-api-access-dwcxx\") pod \"mysqld-exporter-0\" (UID: \"0e3b8959-6e09-4179-aa74-7fc6f43d85d0\") " pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.630278 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-988lk\" (UniqueName: \"kubernetes.io/projected/fb581a5d-521a-4bed-96b5-5c6ae31eed9f-kube-api-access-988lk\") pod \"kube-state-metrics-0\" (UID: \"fb581a5d-521a-4bed-96b5-5c6ae31eed9f\") " pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.631393 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.654684 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.942281 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.954643 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-central-agent" containerID="cri-o://2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf" gracePeriod=30 Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.955200 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="proxy-httpd" containerID="cri-o://83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115" gracePeriod=30 Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.955263 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="sg-core" containerID="cri-o://4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf" gracePeriod=30 Jan 28 17:04:21 crc kubenswrapper[4877]: I0128 17:04:21.955300 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-notification-agent" containerID="cri-o://bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088" gracePeriod=30 Jan 28 17:04:22 crc kubenswrapper[4877]: I0128 17:04:22.218110 4877 generic.go:334] "Generic (PLEG): container finished" podID="f6df48e5-8631-4576-9cff-5795a458241e" containerID="83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115" exitCode=0 Jan 28 17:04:22 crc kubenswrapper[4877]: I0128 17:04:22.218457 4877 generic.go:334] "Generic (PLEG): container finished" podID="f6df48e5-8631-4576-9cff-5795a458241e" containerID="4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf" exitCode=2 Jan 28 17:04:22 crc kubenswrapper[4877]: I0128 17:04:22.218508 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerDied","Data":"83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115"} Jan 28 17:04:22 crc kubenswrapper[4877]: I0128 17:04:22.218545 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerDied","Data":"4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf"} Jan 28 17:04:22 crc kubenswrapper[4877]: I0128 17:04:22.260438 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 17:04:22 crc kubenswrapper[4877]: I0128 17:04:22.422288 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Jan 28 17:04:22 crc kubenswrapper[4877]: W0128 17:04:22.433487 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e3b8959_6e09_4179_aa74_7fc6f43d85d0.slice/crio-1cb713c02066e47decf019a19f1e449e853b964a2fc6db8f55f1bb0f369b09ba WatchSource:0}: Error finding container 1cb713c02066e47decf019a19f1e449e853b964a2fc6db8f55f1bb0f369b09ba: Status 404 returned error can't find the container with id 1cb713c02066e47decf019a19f1e449e853b964a2fc6db8f55f1bb0f369b09ba Jan 28 17:04:23 crc kubenswrapper[4877]: I0128 17:04:23.223357 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 17:04:23 crc kubenswrapper[4877]: I0128 17:04:23.229858 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 17:04:23 crc kubenswrapper[4877]: I0128 17:04:23.230250 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 17:04:23 crc kubenswrapper[4877]: I0128 17:04:23.233831 4877 generic.go:334] "Generic (PLEG): container finished" podID="f6df48e5-8631-4576-9cff-5795a458241e" containerID="2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf" exitCode=0 Jan 28 17:04:23 crc kubenswrapper[4877]: I0128 17:04:23.233896 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerDied","Data":"2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf"} Jan 28 17:04:23 crc kubenswrapper[4877]: I0128 17:04:23.238790 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"0e3b8959-6e09-4179-aa74-7fc6f43d85d0","Type":"ContainerStarted","Data":"1cb713c02066e47decf019a19f1e449e853b964a2fc6db8f55f1bb0f369b09ba"} Jan 28 17:04:23 crc kubenswrapper[4877]: I0128 17:04:23.242115 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"fb581a5d-521a-4bed-96b5-5c6ae31eed9f","Type":"ContainerStarted","Data":"7b7743149462578d08f41a9e2daa49c3ed71ce3e56ed2f51e84da4fe62bb6e1a"} Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.005970 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.099352 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-config-data\") pod \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.099567 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-combined-ca-bundle\") pod \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.130973 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2464c30-bc7c-4481-8ccd-ab866e7fc678" (UID: "e2464c30-bc7c-4481-8ccd-ab866e7fc678"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.131103 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-config-data" (OuterVolumeSpecName: "config-data") pod "e2464c30-bc7c-4481-8ccd-ab866e7fc678" (UID: "e2464c30-bc7c-4481-8ccd-ab866e7fc678"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.201413 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvz5s\" (UniqueName: \"kubernetes.io/projected/e2464c30-bc7c-4481-8ccd-ab866e7fc678-kube-api-access-mvz5s\") pod \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\" (UID: \"e2464c30-bc7c-4481-8ccd-ab866e7fc678\") " Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.201900 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.201937 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2464c30-bc7c-4481-8ccd-ab866e7fc678-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.204312 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2464c30-bc7c-4481-8ccd-ab866e7fc678-kube-api-access-mvz5s" (OuterVolumeSpecName: "kube-api-access-mvz5s") pod "e2464c30-bc7c-4481-8ccd-ab866e7fc678" (UID: "e2464c30-bc7c-4481-8ccd-ab866e7fc678"). InnerVolumeSpecName "kube-api-access-mvz5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.255020 4877 generic.go:334] "Generic (PLEG): container finished" podID="e2464c30-bc7c-4481-8ccd-ab866e7fc678" containerID="cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688" exitCode=137 Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.255113 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e2464c30-bc7c-4481-8ccd-ab866e7fc678","Type":"ContainerDied","Data":"cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688"} Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.255134 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.255153 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e2464c30-bc7c-4481-8ccd-ab866e7fc678","Type":"ContainerDied","Data":"09cf5f00613e7ec89eedc409c1d90a49fcdc5ad4926736de80b600ad9a4d016b"} Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.255175 4877 scope.go:117] "RemoveContainer" containerID="cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.257903 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"0e3b8959-6e09-4179-aa74-7fc6f43d85d0","Type":"ContainerStarted","Data":"dbb4409e0b887695de21673bc17f4d63fe048433254c9b040201db1ba35efabe"} Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.265094 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"fb581a5d-521a-4bed-96b5-5c6ae31eed9f","Type":"ContainerStarted","Data":"3942b42d1a702905dd440392f2f03a38448a9e518fd2976b0141e821dbec8c64"} Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.270720 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.304006 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvz5s\" (UniqueName: \"kubernetes.io/projected/e2464c30-bc7c-4481-8ccd-ab866e7fc678-kube-api-access-mvz5s\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.318699 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.796294632 podStartE2EDuration="3.318666576s" podCreationTimestamp="2026-01-28 17:04:21 +0000 UTC" firstStartedPulling="2026-01-28 17:04:22.435610716 +0000 UTC m=+1765.993937604" lastFinishedPulling="2026-01-28 17:04:22.95798266 +0000 UTC m=+1766.516309548" observedRunningTime="2026-01-28 17:04:24.278363371 +0000 UTC m=+1767.836690269" watchObservedRunningTime="2026-01-28 17:04:24.318666576 +0000 UTC m=+1767.876993464" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.324336 4877 scope.go:117] "RemoveContainer" containerID="cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688" Jan 28 17:04:24 crc kubenswrapper[4877]: E0128 17:04:24.325438 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688\": container with ID starting with cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688 not found: ID does not exist" containerID="cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.325541 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688"} err="failed to get container status \"cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688\": rpc error: code = NotFound desc = could not find container \"cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688\": container with ID starting with cb5ee04a7fb97513308413171cb5b6ea44e962a06b2db140ce3c0c7d8a11e688 not found: ID does not exist" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.350193 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.377053 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.413514 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:04:24 crc kubenswrapper[4877]: E0128 17:04:24.414158 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2464c30-bc7c-4481-8ccd-ab866e7fc678" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.414183 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2464c30-bc7c-4481-8ccd-ab866e7fc678" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.414512 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2464c30-bc7c-4481-8ccd-ab866e7fc678" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.415508 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.421016 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.421206 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.429033 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.453540 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.461389 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.944243926 podStartE2EDuration="3.461370079s" podCreationTimestamp="2026-01-28 17:04:21 +0000 UTC" firstStartedPulling="2026-01-28 17:04:22.281670811 +0000 UTC m=+1765.839997699" lastFinishedPulling="2026-01-28 17:04:22.798796964 +0000 UTC m=+1766.357123852" observedRunningTime="2026-01-28 17:04:24.363847883 +0000 UTC m=+1767.922174781" watchObservedRunningTime="2026-01-28 17:04:24.461370079 +0000 UTC m=+1768.019696967" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.510506 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.510613 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bjn6\" (UniqueName: \"kubernetes.io/projected/45dc70cd-3b82-4342-8194-1d1794ad1ad0-kube-api-access-6bjn6\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.510779 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.510823 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.510920 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.612764 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.612930 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bjn6\" (UniqueName: \"kubernetes.io/projected/45dc70cd-3b82-4342-8194-1d1794ad1ad0-kube-api-access-6bjn6\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.613060 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.613100 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.613191 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.617333 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.617519 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.618096 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.619972 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/45dc70cd-3b82-4342-8194-1d1794ad1ad0-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.635951 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bjn6\" (UniqueName: \"kubernetes.io/projected/45dc70cd-3b82-4342-8194-1d1794ad1ad0-kube-api-access-6bjn6\") pod \"nova-cell1-novncproxy-0\" (UID: \"45dc70cd-3b82-4342-8194-1d1794ad1ad0\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:24 crc kubenswrapper[4877]: I0128 17:04:24.750712 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:25 crc kubenswrapper[4877]: I0128 17:04:25.278204 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 28 17:04:25 crc kubenswrapper[4877]: I0128 17:04:25.287180 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 17:04:25 crc kubenswrapper[4877]: I0128 17:04:25.287682 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 17:04:25 crc kubenswrapper[4877]: I0128 17:04:25.297714 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 17:04:25 crc kubenswrapper[4877]: I0128 17:04:25.299256 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 17:04:25 crc kubenswrapper[4877]: I0128 17:04:25.310570 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 17:04:25 crc kubenswrapper[4877]: I0128 17:04:25.349771 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2464c30-bc7c-4481-8ccd-ab866e7fc678" path="/var/lib/kubelet/pods/e2464c30-bc7c-4481-8ccd-ab866e7fc678/volumes" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.290786 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"45dc70cd-3b82-4342-8194-1d1794ad1ad0","Type":"ContainerStarted","Data":"adcf6e22ba47dc0ad9f388da8b1b74eee9f7da07651017bdac92e6ce002f6249"} Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.291220 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.291277 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"45dc70cd-3b82-4342-8194-1d1794ad1ad0","Type":"ContainerStarted","Data":"7bcc63750335a3d7047e5961661d05b3a92666a48a4a0be2c8d16444512d11aa"} Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.321563 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.321545292 podStartE2EDuration="2.321545292s" podCreationTimestamp="2026-01-28 17:04:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:26.317838993 +0000 UTC m=+1769.876165881" watchObservedRunningTime="2026-01-28 17:04:26.321545292 +0000 UTC m=+1769.879872180" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.538622 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.786372 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-87n8s"] Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.788822 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.810544 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-87n8s"] Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.977392 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.977542 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.977622 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-config\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.977812 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.977970 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:26 crc kubenswrapper[4877]: I0128 17:04:26.978065 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rprxh\" (UniqueName: \"kubernetes.io/projected/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-kube-api-access-rprxh\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.081424 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-config\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.082503 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-config\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.083394 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.084109 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.084304 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.085014 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.085167 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rprxh\" (UniqueName: \"kubernetes.io/projected/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-kube-api-access-rprxh\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.085849 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.086551 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.086764 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.087566 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.118172 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rprxh\" (UniqueName: \"kubernetes.io/projected/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-kube-api-access-rprxh\") pod \"dnsmasq-dns-79b5d74c8c-87n8s\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.119899 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.778229 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-87n8s"] Jan 28 17:04:27 crc kubenswrapper[4877]: I0128 17:04:27.967139 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.017835 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-scripts\") pod \"f6df48e5-8631-4576-9cff-5795a458241e\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.018578 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-sg-core-conf-yaml\") pod \"f6df48e5-8631-4576-9cff-5795a458241e\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.018622 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-run-httpd\") pod \"f6df48e5-8631-4576-9cff-5795a458241e\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.020961 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-log-httpd\") pod \"f6df48e5-8631-4576-9cff-5795a458241e\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.021014 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-config-data\") pod \"f6df48e5-8631-4576-9cff-5795a458241e\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.021053 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-combined-ca-bundle\") pod \"f6df48e5-8631-4576-9cff-5795a458241e\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.021091 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5fxt\" (UniqueName: \"kubernetes.io/projected/f6df48e5-8631-4576-9cff-5795a458241e-kube-api-access-g5fxt\") pod \"f6df48e5-8631-4576-9cff-5795a458241e\" (UID: \"f6df48e5-8631-4576-9cff-5795a458241e\") " Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.024672 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-scripts" (OuterVolumeSpecName: "scripts") pod "f6df48e5-8631-4576-9cff-5795a458241e" (UID: "f6df48e5-8631-4576-9cff-5795a458241e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.025211 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f6df48e5-8631-4576-9cff-5795a458241e" (UID: "f6df48e5-8631-4576-9cff-5795a458241e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.027434 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f6df48e5-8631-4576-9cff-5795a458241e" (UID: "f6df48e5-8631-4576-9cff-5795a458241e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.029461 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.035820 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.035888 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6df48e5-8631-4576-9cff-5795a458241e-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.038218 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6df48e5-8631-4576-9cff-5795a458241e-kube-api-access-g5fxt" (OuterVolumeSpecName: "kube-api-access-g5fxt") pod "f6df48e5-8631-4576-9cff-5795a458241e" (UID: "f6df48e5-8631-4576-9cff-5795a458241e"). InnerVolumeSpecName "kube-api-access-g5fxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.138641 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5fxt\" (UniqueName: \"kubernetes.io/projected/f6df48e5-8631-4576-9cff-5795a458241e-kube-api-access-g5fxt\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.167536 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f6df48e5-8631-4576-9cff-5795a458241e" (UID: "f6df48e5-8631-4576-9cff-5795a458241e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.246784 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.272642 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6df48e5-8631-4576-9cff-5795a458241e" (UID: "f6df48e5-8631-4576-9cff-5795a458241e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.346768 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" event={"ID":"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1","Type":"ContainerStarted","Data":"0b4579ff3b99fcbed0d1f86794cee9170c201767c7c903ee64e7aabc65f024ce"} Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.346820 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" event={"ID":"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1","Type":"ContainerStarted","Data":"51ad788d57f4ac4bdd2dbe60ef093822fec90714291413b224f111524db2a02c"} Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.351046 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.363340 4877 generic.go:334] "Generic (PLEG): container finished" podID="f6df48e5-8631-4576-9cff-5795a458241e" containerID="bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088" exitCode=0 Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.363441 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.363514 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerDied","Data":"bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088"} Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.363556 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6df48e5-8631-4576-9cff-5795a458241e","Type":"ContainerDied","Data":"d1ffcf341fe062aeadbf6f0d6b6322aa663b564bb05682fd0110736523a4f082"} Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.363579 4877 scope.go:117] "RemoveContainer" containerID="83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.425211 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-config-data" (OuterVolumeSpecName: "config-data") pod "f6df48e5-8631-4576-9cff-5795a458241e" (UID: "f6df48e5-8631-4576-9cff-5795a458241e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.453700 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6df48e5-8631-4576-9cff-5795a458241e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.604121 4877 scope.go:117] "RemoveContainer" containerID="4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.636967 4877 scope.go:117] "RemoveContainer" containerID="bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.683600 4877 scope.go:117] "RemoveContainer" containerID="2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.692566 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0f7ed22_d3da_47ce_b61c_53a0b7a878e1.slice/crio-conmon-0b4579ff3b99fcbed0d1f86794cee9170c201767c7c903ee64e7aabc65f024ce.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0f7ed22_d3da_47ce_b61c_53a0b7a878e1.slice/crio-0b4579ff3b99fcbed0d1f86794cee9170c201767c7c903ee64e7aabc65f024ce.scope\": RecentStats: unable to find data in memory cache]" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.718972 4877 scope.go:117] "RemoveContainer" containerID="83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.719346 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115\": container with ID starting with 83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115 not found: ID does not exist" containerID="83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.719395 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115"} err="failed to get container status \"83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115\": rpc error: code = NotFound desc = could not find container \"83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115\": container with ID starting with 83e1c715e67819568d645f006c3e8854fb63ad0bf020027308b83e8d0f642115 not found: ID does not exist" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.719428 4877 scope.go:117] "RemoveContainer" containerID="4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.719660 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf\": container with ID starting with 4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf not found: ID does not exist" containerID="4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.719690 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf"} err="failed to get container status \"4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf\": rpc error: code = NotFound desc = could not find container \"4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf\": container with ID starting with 4075c3b9fe27021d713ab9e343ce0ad04f76203e212bb5877eb13abd2e61fcaf not found: ID does not exist" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.719708 4877 scope.go:117] "RemoveContainer" containerID="bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.719877 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088\": container with ID starting with bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088 not found: ID does not exist" containerID="bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.719915 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088"} err="failed to get container status \"bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088\": rpc error: code = NotFound desc = could not find container \"bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088\": container with ID starting with bb77ca55177d07dda480721f36c0b4753971c59a418f2bc714c11113ac3d2088 not found: ID does not exist" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.719932 4877 scope.go:117] "RemoveContainer" containerID="2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.720093 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf\": container with ID starting with 2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf not found: ID does not exist" containerID="2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.720119 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf"} err="failed to get container status \"2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf\": rpc error: code = NotFound desc = could not find container \"2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf\": container with ID starting with 2eb3ca4ed1203afb503d2152f95bef8dd7210bc0f37812a80f2b8e348d35caaf not found: ID does not exist" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.753958 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.792847 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.813885 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.814440 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-notification-agent" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814455 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-notification-agent" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.814488 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-central-agent" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814494 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-central-agent" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.814519 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="sg-core" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814525 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="sg-core" Jan 28 17:04:28 crc kubenswrapper[4877]: E0128 17:04:28.814546 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="proxy-httpd" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814551 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="proxy-httpd" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814767 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="proxy-httpd" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814780 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-notification-agent" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814789 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="ceilometer-central-agent" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.814804 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6df48e5-8631-4576-9cff-5795a458241e" containerName="sg-core" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.816877 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.819619 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.819707 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.819855 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.829077 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869232 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869337 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869381 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-scripts\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869406 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-config-data\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869534 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869595 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-run-httpd\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869624 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-log-httpd\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.869657 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptp22\" (UniqueName: \"kubernetes.io/projected/25614695-7b60-4b60-8e43-4f20e6c3d44b-kube-api-access-ptp22\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.973923 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.974165 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.974328 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-scripts\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.974382 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-config-data\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.974682 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.974831 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-run-httpd\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.974883 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-log-httpd\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.974969 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptp22\" (UniqueName: \"kubernetes.io/projected/25614695-7b60-4b60-8e43-4f20e6c3d44b-kube-api-access-ptp22\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.981954 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-run-httpd\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.993739 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-log-httpd\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.998698 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:28 crc kubenswrapper[4877]: I0128 17:04:28.999850 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-scripts\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.001142 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.001761 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.002778 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-config-data\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.009403 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptp22\" (UniqueName: \"kubernetes.io/projected/25614695-7b60-4b60-8e43-4f20e6c3d44b-kube-api-access-ptp22\") pod \"ceilometer-0\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " pod="openstack/ceilometer-0" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.142628 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.346369 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6df48e5-8631-4576-9cff-5795a458241e" path="/var/lib/kubelet/pods/f6df48e5-8631-4576-9cff-5795a458241e/volumes" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.402953 4877 generic.go:334] "Generic (PLEG): container finished" podID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerID="0b4579ff3b99fcbed0d1f86794cee9170c201767c7c903ee64e7aabc65f024ce" exitCode=0 Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.403044 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" event={"ID":"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1","Type":"ContainerDied","Data":"0b4579ff3b99fcbed0d1f86794cee9170c201767c7c903ee64e7aabc65f024ce"} Jan 28 17:04:29 crc kubenswrapper[4877]: W0128 17:04:29.736288 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25614695_7b60_4b60_8e43_4f20e6c3d44b.slice/crio-bdeedd694d6afad552f0192a9a03339dcdb7d91355ed49471caacb38203af560 WatchSource:0}: Error finding container bdeedd694d6afad552f0192a9a03339dcdb7d91355ed49471caacb38203af560: Status 404 returned error can't find the container with id bdeedd694d6afad552f0192a9a03339dcdb7d91355ed49471caacb38203af560 Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.739512 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.752046 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.874516 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.874744 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-log" containerID="cri-o://2203cc83e2964ab1aa11525d5c20cae3b5e49a9c84e8069d807b07a1456f5448" gracePeriod=30 Jan 28 17:04:29 crc kubenswrapper[4877]: I0128 17:04:29.875207 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-api" containerID="cri-o://c3b055a7054faeb73cdfbfab890a5b3cf062179dcd2635991b2ffa97c535f6b7" gracePeriod=30 Jan 28 17:04:30 crc kubenswrapper[4877]: I0128 17:04:30.435050 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" event={"ID":"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1","Type":"ContainerStarted","Data":"e11d2af44f807b9e51bae7e84ac049753035ecf68bd26cd8546332748c71bc3d"} Jan 28 17:04:30 crc kubenswrapper[4877]: I0128 17:04:30.435612 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:30 crc kubenswrapper[4877]: I0128 17:04:30.437720 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerStarted","Data":"bdeedd694d6afad552f0192a9a03339dcdb7d91355ed49471caacb38203af560"} Jan 28 17:04:30 crc kubenswrapper[4877]: I0128 17:04:30.449073 4877 generic.go:334] "Generic (PLEG): container finished" podID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerID="2203cc83e2964ab1aa11525d5c20cae3b5e49a9c84e8069d807b07a1456f5448" exitCode=143 Jan 28 17:04:30 crc kubenswrapper[4877]: I0128 17:04:30.449120 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5886ef03-0905-4c4c-89cf-7de7804f8e5a","Type":"ContainerDied","Data":"2203cc83e2964ab1aa11525d5c20cae3b5e49a9c84e8069d807b07a1456f5448"} Jan 28 17:04:30 crc kubenswrapper[4877]: I0128 17:04:30.461774 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" podStartSLOduration=4.461748805 podStartE2EDuration="4.461748805s" podCreationTimestamp="2026-01-28 17:04:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:30.452753223 +0000 UTC m=+1774.011080111" watchObservedRunningTime="2026-01-28 17:04:30.461748805 +0000 UTC m=+1774.020075693" Jan 28 17:04:30 crc kubenswrapper[4877]: I0128 17:04:30.508870 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:31 crc kubenswrapper[4877]: I0128 17:04:31.500941 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerStarted","Data":"99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed"} Jan 28 17:04:31 crc kubenswrapper[4877]: I0128 17:04:31.501622 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerStarted","Data":"1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898"} Jan 28 17:04:31 crc kubenswrapper[4877]: I0128 17:04:31.665225 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 28 17:04:32 crc kubenswrapper[4877]: I0128 17:04:32.331038 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:04:32 crc kubenswrapper[4877]: E0128 17:04:32.331842 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:04:32 crc kubenswrapper[4877]: I0128 17:04:32.515088 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerStarted","Data":"7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008"} Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.528891 4877 generic.go:334] "Generic (PLEG): container finished" podID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerID="c3b055a7054faeb73cdfbfab890a5b3cf062179dcd2635991b2ffa97c535f6b7" exitCode=0 Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.528982 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5886ef03-0905-4c4c-89cf-7de7804f8e5a","Type":"ContainerDied","Data":"c3b055a7054faeb73cdfbfab890a5b3cf062179dcd2635991b2ffa97c535f6b7"} Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.529214 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5886ef03-0905-4c4c-89cf-7de7804f8e5a","Type":"ContainerDied","Data":"5d94523ead590c984da7464e5f96c0ee48663c1582ff2220ca48e7eba9bfa88b"} Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.529229 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d94523ead590c984da7464e5f96c0ee48663c1582ff2220ca48e7eba9bfa88b" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.670713 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.713669 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5886ef03-0905-4c4c-89cf-7de7804f8e5a-logs\") pod \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.713954 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-combined-ca-bundle\") pod \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.714019 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-config-data\") pod \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.714066 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbqfq\" (UniqueName: \"kubernetes.io/projected/5886ef03-0905-4c4c-89cf-7de7804f8e5a-kube-api-access-bbqfq\") pod \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\" (UID: \"5886ef03-0905-4c4c-89cf-7de7804f8e5a\") " Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.714346 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5886ef03-0905-4c4c-89cf-7de7804f8e5a-logs" (OuterVolumeSpecName: "logs") pod "5886ef03-0905-4c4c-89cf-7de7804f8e5a" (UID: "5886ef03-0905-4c4c-89cf-7de7804f8e5a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.714808 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5886ef03-0905-4c4c-89cf-7de7804f8e5a-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.732731 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5886ef03-0905-4c4c-89cf-7de7804f8e5a-kube-api-access-bbqfq" (OuterVolumeSpecName: "kube-api-access-bbqfq") pod "5886ef03-0905-4c4c-89cf-7de7804f8e5a" (UID: "5886ef03-0905-4c4c-89cf-7de7804f8e5a"). InnerVolumeSpecName "kube-api-access-bbqfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.764686 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-config-data" (OuterVolumeSpecName: "config-data") pod "5886ef03-0905-4c4c-89cf-7de7804f8e5a" (UID: "5886ef03-0905-4c4c-89cf-7de7804f8e5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.775361 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5886ef03-0905-4c4c-89cf-7de7804f8e5a" (UID: "5886ef03-0905-4c4c-89cf-7de7804f8e5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.817519 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.817736 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5886ef03-0905-4c4c-89cf-7de7804f8e5a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:33 crc kubenswrapper[4877]: I0128 17:04:33.817805 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbqfq\" (UniqueName: \"kubernetes.io/projected/5886ef03-0905-4c4c-89cf-7de7804f8e5a-kube-api-access-bbqfq\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.543900 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.544671 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerStarted","Data":"e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda"} Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.544719 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-central-agent" containerID="cri-o://99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed" gracePeriod=30 Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.544801 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="sg-core" containerID="cri-o://7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008" gracePeriod=30 Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.544854 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.544847 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-notification-agent" containerID="cri-o://1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898" gracePeriod=30 Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.544847 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="proxy-httpd" containerID="cri-o://e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda" gracePeriod=30 Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.596105 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.404563095 podStartE2EDuration="6.59608101s" podCreationTimestamp="2026-01-28 17:04:28 +0000 UTC" firstStartedPulling="2026-01-28 17:04:29.739078228 +0000 UTC m=+1773.297405116" lastFinishedPulling="2026-01-28 17:04:33.930596143 +0000 UTC m=+1777.488923031" observedRunningTime="2026-01-28 17:04:34.578836476 +0000 UTC m=+1778.137163364" watchObservedRunningTime="2026-01-28 17:04:34.59608101 +0000 UTC m=+1778.154407918" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.615734 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.631968 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.650529 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:34 crc kubenswrapper[4877]: E0128 17:04:34.651164 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-log" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.651187 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-log" Jan 28 17:04:34 crc kubenswrapper[4877]: E0128 17:04:34.651218 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-api" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.651224 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-api" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.651440 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-api" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.651457 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" containerName="nova-api-log" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.652740 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.660273 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.660559 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.660703 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.716146 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.741439 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-public-tls-certs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.741542 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-logs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.741649 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-config-data\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.741685 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.741783 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxh8h\" (UniqueName: \"kubernetes.io/projected/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-kube-api-access-vxh8h\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.741871 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.751904 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.783996 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.845309 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-config-data\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.845626 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.845847 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxh8h\" (UniqueName: \"kubernetes.io/projected/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-kube-api-access-vxh8h\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.846498 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.846746 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-public-tls-certs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.846888 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-logs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.853327 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-logs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.861269 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.863661 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-public-tls-certs\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.866770 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-config-data\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.867592 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:34 crc kubenswrapper[4877]: I0128 17:04:34.876579 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxh8h\" (UniqueName: \"kubernetes.io/projected/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-kube-api-access-vxh8h\") pod \"nova-api-0\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " pod="openstack/nova-api-0" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.136554 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.355035 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5886ef03-0905-4c4c-89cf-7de7804f8e5a" path="/var/lib/kubelet/pods/5886ef03-0905-4c4c-89cf-7de7804f8e5a/volumes" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.570271 4877 generic.go:334] "Generic (PLEG): container finished" podID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerID="e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda" exitCode=0 Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.570311 4877 generic.go:334] "Generic (PLEG): container finished" podID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerID="7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008" exitCode=2 Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.570320 4877 generic.go:334] "Generic (PLEG): container finished" podID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerID="1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898" exitCode=0 Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.570339 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerDied","Data":"e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda"} Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.570383 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerDied","Data":"7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008"} Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.570397 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerDied","Data":"1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898"} Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.592696 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 28 17:04:35 crc kubenswrapper[4877]: E0128 17:04:35.691249 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25614695_7b60_4b60_8e43_4f20e6c3d44b.slice/crio-conmon-1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25614695_7b60_4b60_8e43_4f20e6c3d44b.slice/crio-1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898.scope\": RecentStats: unable to find data in memory cache]" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.772631 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.848180 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-8jrbg"] Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.852000 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.859932 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.860398 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.875808 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-8jrbg"] Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.980217 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.980330 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-config-data\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.980405 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24l6n\" (UniqueName: \"kubernetes.io/projected/b8e73c54-63fa-450f-8d23-9d566575569e-kube-api-access-24l6n\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:35 crc kubenswrapper[4877]: I0128 17:04:35.980617 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-scripts\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.083201 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-scripts\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.083821 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.083925 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-config-data\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.084029 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24l6n\" (UniqueName: \"kubernetes.io/projected/b8e73c54-63fa-450f-8d23-9d566575569e-kube-api-access-24l6n\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.091175 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-scripts\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.091176 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-config-data\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.097426 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.102448 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24l6n\" (UniqueName: \"kubernetes.io/projected/b8e73c54-63fa-450f-8d23-9d566575569e-kube-api-access-24l6n\") pod \"nova-cell1-cell-mapping-8jrbg\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.338654 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.608581 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf","Type":"ContainerStarted","Data":"eb76f4a927a751f3afe3a16afa16391e4021875fda3397490614a9ef35580094"} Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.608906 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf","Type":"ContainerStarted","Data":"1e99f7ed46e6835b5628dd42a72b0432a03bb6151c5fce13922ae495a79b4071"} Jan 28 17:04:36 crc kubenswrapper[4877]: I0128 17:04:36.918847 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-8jrbg"] Jan 28 17:04:36 crc kubenswrapper[4877]: W0128 17:04:36.919044 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8e73c54_63fa_450f_8d23_9d566575569e.slice/crio-97367e1dbc1f8ead218c8241e46e2e0dd112e29e7f23822859227c0f27da90af WatchSource:0}: Error finding container 97367e1dbc1f8ead218c8241e46e2e0dd112e29e7f23822859227c0f27da90af: Status 404 returned error can't find the container with id 97367e1dbc1f8ead218c8241e46e2e0dd112e29e7f23822859227c0f27da90af Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.121701 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.214713 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-wgdw5"] Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.214961 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" podUID="d61dd139-ed88-4a56-974d-3860f196e55d" containerName="dnsmasq-dns" containerID="cri-o://193cec595451e9a6635aedd8fa161bb3ffd7422d6d6a47a5fa3a9933c06bdffc" gracePeriod=10 Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.628616 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8jrbg" event={"ID":"b8e73c54-63fa-450f-8d23-9d566575569e","Type":"ContainerStarted","Data":"01a871d9b7cb7c30eb2fc7c233b32c042067939b85b4157c38ba6228f3d56c86"} Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.628662 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8jrbg" event={"ID":"b8e73c54-63fa-450f-8d23-9d566575569e","Type":"ContainerStarted","Data":"97367e1dbc1f8ead218c8241e46e2e0dd112e29e7f23822859227c0f27da90af"} Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.642706 4877 generic.go:334] "Generic (PLEG): container finished" podID="d61dd139-ed88-4a56-974d-3860f196e55d" containerID="193cec595451e9a6635aedd8fa161bb3ffd7422d6d6a47a5fa3a9933c06bdffc" exitCode=0 Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.642823 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" event={"ID":"d61dd139-ed88-4a56-974d-3860f196e55d","Type":"ContainerDied","Data":"193cec595451e9a6635aedd8fa161bb3ffd7422d6d6a47a5fa3a9933c06bdffc"} Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.651534 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf","Type":"ContainerStarted","Data":"abf2484a811c1a751f1970991c49d32c5c3b0510b6947260bcc95e576353ae91"} Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.664696 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-8jrbg" podStartSLOduration=2.664673211 podStartE2EDuration="2.664673211s" podCreationTimestamp="2026-01-28 17:04:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:37.646318226 +0000 UTC m=+1781.204645114" watchObservedRunningTime="2026-01-28 17:04:37.664673211 +0000 UTC m=+1781.223000099" Jan 28 17:04:37 crc kubenswrapper[4877]: I0128 17:04:37.690146 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.690123386 podStartE2EDuration="3.690123386s" podCreationTimestamp="2026-01-28 17:04:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:37.671659079 +0000 UTC m=+1781.229985967" watchObservedRunningTime="2026-01-28 17:04:37.690123386 +0000 UTC m=+1781.248450274" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.085021 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.152789 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-config\") pod \"d61dd139-ed88-4a56-974d-3860f196e55d\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.152847 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk7d9\" (UniqueName: \"kubernetes.io/projected/d61dd139-ed88-4a56-974d-3860f196e55d-kube-api-access-wk7d9\") pod \"d61dd139-ed88-4a56-974d-3860f196e55d\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.152898 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-nb\") pod \"d61dd139-ed88-4a56-974d-3860f196e55d\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.153126 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-sb\") pod \"d61dd139-ed88-4a56-974d-3860f196e55d\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.153715 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-swift-storage-0\") pod \"d61dd139-ed88-4a56-974d-3860f196e55d\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.153763 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-svc\") pod \"d61dd139-ed88-4a56-974d-3860f196e55d\" (UID: \"d61dd139-ed88-4a56-974d-3860f196e55d\") " Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.170529 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d61dd139-ed88-4a56-974d-3860f196e55d-kube-api-access-wk7d9" (OuterVolumeSpecName: "kube-api-access-wk7d9") pod "d61dd139-ed88-4a56-974d-3860f196e55d" (UID: "d61dd139-ed88-4a56-974d-3860f196e55d"). InnerVolumeSpecName "kube-api-access-wk7d9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.259157 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk7d9\" (UniqueName: \"kubernetes.io/projected/d61dd139-ed88-4a56-974d-3860f196e55d-kube-api-access-wk7d9\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.336523 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d61dd139-ed88-4a56-974d-3860f196e55d" (UID: "d61dd139-ed88-4a56-974d-3860f196e55d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.344920 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d61dd139-ed88-4a56-974d-3860f196e55d" (UID: "d61dd139-ed88-4a56-974d-3860f196e55d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.359440 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d61dd139-ed88-4a56-974d-3860f196e55d" (UID: "d61dd139-ed88-4a56-974d-3860f196e55d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.361422 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.361456 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.361466 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.372869 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-config" (OuterVolumeSpecName: "config") pod "d61dd139-ed88-4a56-974d-3860f196e55d" (UID: "d61dd139-ed88-4a56-974d-3860f196e55d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.389737 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d61dd139-ed88-4a56-974d-3860f196e55d" (UID: "d61dd139-ed88-4a56-974d-3860f196e55d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.463792 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.463830 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d61dd139-ed88-4a56-974d-3860f196e55d-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.666969 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" event={"ID":"d61dd139-ed88-4a56-974d-3860f196e55d","Type":"ContainerDied","Data":"3164420824da21792c83b06a89da2e238292a4858721990da8f60e0db79537bd"} Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.667301 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-wgdw5" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.667996 4877 scope.go:117] "RemoveContainer" containerID="193cec595451e9a6635aedd8fa161bb3ffd7422d6d6a47a5fa3a9933c06bdffc" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.714701 4877 scope.go:117] "RemoveContainer" containerID="b6985e3177c8cadcf98b329537029623a7da0f3b1095063d99babfc3c43ea51d" Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.718529 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-wgdw5"] Jan 28 17:04:38 crc kubenswrapper[4877]: I0128 17:04:38.730742 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-wgdw5"] Jan 28 17:04:39 crc kubenswrapper[4877]: I0128 17:04:39.346713 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d61dd139-ed88-4a56-974d-3860f196e55d" path="/var/lib/kubelet/pods/d61dd139-ed88-4a56-974d-3860f196e55d/volumes" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.265601 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.327786 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-combined-ca-bundle\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.327986 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptp22\" (UniqueName: \"kubernetes.io/projected/25614695-7b60-4b60-8e43-4f20e6c3d44b-kube-api-access-ptp22\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.328103 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-scripts\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.328239 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-sg-core-conf-yaml\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.328343 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-ceilometer-tls-certs\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.328395 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-log-httpd\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.328558 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-config-data\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.328615 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-run-httpd\") pod \"25614695-7b60-4b60-8e43-4f20e6c3d44b\" (UID: \"25614695-7b60-4b60-8e43-4f20e6c3d44b\") " Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.328936 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.329149 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.329786 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.329818 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/25614695-7b60-4b60-8e43-4f20e6c3d44b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.333844 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-scripts" (OuterVolumeSpecName: "scripts") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.333873 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25614695-7b60-4b60-8e43-4f20e6c3d44b-kube-api-access-ptp22" (OuterVolumeSpecName: "kube-api-access-ptp22") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "kube-api-access-ptp22". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.398600 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.412409 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.432698 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.432753 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.432763 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.432773 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptp22\" (UniqueName: \"kubernetes.io/projected/25614695-7b60-4b60-8e43-4f20e6c3d44b-kube-api-access-ptp22\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.461796 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.470805 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-config-data" (OuterVolumeSpecName: "config-data") pod "25614695-7b60-4b60-8e43-4f20e6c3d44b" (UID: "25614695-7b60-4b60-8e43-4f20e6c3d44b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.534496 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.534542 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25614695-7b60-4b60-8e43-4f20e6c3d44b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.700202 4877 generic.go:334] "Generic (PLEG): container finished" podID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerID="99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed" exitCode=0 Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.700253 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerDied","Data":"99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed"} Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.700290 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"25614695-7b60-4b60-8e43-4f20e6c3d44b","Type":"ContainerDied","Data":"bdeedd694d6afad552f0192a9a03339dcdb7d91355ed49471caacb38203af560"} Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.700314 4877 scope.go:117] "RemoveContainer" containerID="e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.700436 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.734644 4877 scope.go:117] "RemoveContainer" containerID="7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.784533 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.792935 4877 scope.go:117] "RemoveContainer" containerID="1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.818311 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.845332 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.845908 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-notification-agent" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.845930 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-notification-agent" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.845954 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61dd139-ed88-4a56-974d-3860f196e55d" containerName="dnsmasq-dns" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.845960 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61dd139-ed88-4a56-974d-3860f196e55d" containerName="dnsmasq-dns" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.845977 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61dd139-ed88-4a56-974d-3860f196e55d" containerName="init" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.845985 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61dd139-ed88-4a56-974d-3860f196e55d" containerName="init" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.845995 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="sg-core" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846001 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="sg-core" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.846016 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="proxy-httpd" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846022 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="proxy-httpd" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.846051 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-central-agent" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846057 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-central-agent" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846265 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d61dd139-ed88-4a56-974d-3860f196e55d" containerName="dnsmasq-dns" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846292 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-notification-agent" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846302 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="sg-core" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846311 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="ceilometer-central-agent" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.846325 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" containerName="proxy-httpd" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.848457 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.857149 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.862515 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.862571 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.862624 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.885847 4877 scope.go:117] "RemoveContainer" containerID="99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.915685 4877 scope.go:117] "RemoveContainer" containerID="e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.916256 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda\": container with ID starting with e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda not found: ID does not exist" containerID="e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.916346 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda"} err="failed to get container status \"e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda\": rpc error: code = NotFound desc = could not find container \"e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda\": container with ID starting with e7409ae6245e6c6f0ed03ef7d56b48a0ff5fcfd61936ec653cd529d829cf8fda not found: ID does not exist" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.916391 4877 scope.go:117] "RemoveContainer" containerID="7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.916926 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008\": container with ID starting with 7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008 not found: ID does not exist" containerID="7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.916986 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008"} err="failed to get container status \"7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008\": rpc error: code = NotFound desc = could not find container \"7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008\": container with ID starting with 7c9e5800b637d8fa4d641d62ebd63adb48880f3fd7922786fd300d893664d008 not found: ID does not exist" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.917018 4877 scope.go:117] "RemoveContainer" containerID="1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.917334 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898\": container with ID starting with 1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898 not found: ID does not exist" containerID="1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.917370 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898"} err="failed to get container status \"1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898\": rpc error: code = NotFound desc = could not find container \"1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898\": container with ID starting with 1d5303858c3ddd0d1de5047fce65b0ac11083b9b474caf67a91f29bccce34898 not found: ID does not exist" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.917390 4877 scope.go:117] "RemoveContainer" containerID="99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed" Jan 28 17:04:40 crc kubenswrapper[4877]: E0128 17:04:40.917727 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed\": container with ID starting with 99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed not found: ID does not exist" containerID="99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.917755 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed"} err="failed to get container status \"99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed\": rpc error: code = NotFound desc = could not find container \"99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed\": container with ID starting with 99005acbed1e30f14f0cacc77abf700ff849cbf321aee7573fbc24fc3acbb5ed not found: ID does not exist" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.944954 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-config-data\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.945048 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.945185 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-run-httpd\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.945285 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-log-httpd\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.945699 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.946069 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.946237 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rwfg\" (UniqueName: \"kubernetes.io/projected/bce26068-762c-4da5-aa19-cf4c27d56c19-kube-api-access-2rwfg\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:40 crc kubenswrapper[4877]: I0128 17:04:40.946387 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-scripts\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.048904 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-config-data\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.048963 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049048 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-run-httpd\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049117 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-log-httpd\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049191 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049294 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049359 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rwfg\" (UniqueName: \"kubernetes.io/projected/bce26068-762c-4da5-aa19-cf4c27d56c19-kube-api-access-2rwfg\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049417 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-scripts\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049642 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-run-httpd\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.049642 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-log-httpd\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.053868 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.054383 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.054611 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-scripts\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.055453 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-config-data\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.062741 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.070919 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rwfg\" (UniqueName: \"kubernetes.io/projected/bce26068-762c-4da5-aa19-cf4c27d56c19-kube-api-access-2rwfg\") pod \"ceilometer-0\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.183899 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.380650 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25614695-7b60-4b60-8e43-4f20e6c3d44b" path="/var/lib/kubelet/pods/25614695-7b60-4b60-8e43-4f20e6c3d44b/volumes" Jan 28 17:04:41 crc kubenswrapper[4877]: I0128 17:04:41.774966 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:04:42 crc kubenswrapper[4877]: I0128 17:04:42.731093 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerStarted","Data":"efd8471085341bd2888975f79dea337e1f50d9041cd520726407bd81e219fa65"} Jan 28 17:04:42 crc kubenswrapper[4877]: I0128 17:04:42.731460 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerStarted","Data":"03d08429375204d5cb2689cfc0cd62adea00a48b57e0a07c432da0f251d11fe9"} Jan 28 17:04:43 crc kubenswrapper[4877]: I0128 17:04:43.332006 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:04:43 crc kubenswrapper[4877]: E0128 17:04:43.332693 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:04:43 crc kubenswrapper[4877]: I0128 17:04:43.748818 4877 generic.go:334] "Generic (PLEG): container finished" podID="b8e73c54-63fa-450f-8d23-9d566575569e" containerID="01a871d9b7cb7c30eb2fc7c233b32c042067939b85b4157c38ba6228f3d56c86" exitCode=0 Jan 28 17:04:43 crc kubenswrapper[4877]: I0128 17:04:43.749195 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8jrbg" event={"ID":"b8e73c54-63fa-450f-8d23-9d566575569e","Type":"ContainerDied","Data":"01a871d9b7cb7c30eb2fc7c233b32c042067939b85b4157c38ba6228f3d56c86"} Jan 28 17:04:43 crc kubenswrapper[4877]: I0128 17:04:43.757788 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerStarted","Data":"129a129c1f9621481b00d05607a3aeef040820ce458f6b7114d8f32934d2cc36"} Jan 28 17:04:44 crc kubenswrapper[4877]: I0128 17:04:44.814638 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerStarted","Data":"35eb8e578ef48ad568ce4c632e2516aa822ae878bd180030879b3e4923f11b54"} Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.141297 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.143089 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.404964 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.583234 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-scripts\") pod \"b8e73c54-63fa-450f-8d23-9d566575569e\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.583358 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-combined-ca-bundle\") pod \"b8e73c54-63fa-450f-8d23-9d566575569e\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.584234 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-config-data\") pod \"b8e73c54-63fa-450f-8d23-9d566575569e\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.584319 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24l6n\" (UniqueName: \"kubernetes.io/projected/b8e73c54-63fa-450f-8d23-9d566575569e-kube-api-access-24l6n\") pod \"b8e73c54-63fa-450f-8d23-9d566575569e\" (UID: \"b8e73c54-63fa-450f-8d23-9d566575569e\") " Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.589024 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-scripts" (OuterVolumeSpecName: "scripts") pod "b8e73c54-63fa-450f-8d23-9d566575569e" (UID: "b8e73c54-63fa-450f-8d23-9d566575569e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.611946 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8e73c54-63fa-450f-8d23-9d566575569e-kube-api-access-24l6n" (OuterVolumeSpecName: "kube-api-access-24l6n") pod "b8e73c54-63fa-450f-8d23-9d566575569e" (UID: "b8e73c54-63fa-450f-8d23-9d566575569e"). InnerVolumeSpecName "kube-api-access-24l6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.623212 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8e73c54-63fa-450f-8d23-9d566575569e" (UID: "b8e73c54-63fa-450f-8d23-9d566575569e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.648723 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-config-data" (OuterVolumeSpecName: "config-data") pod "b8e73c54-63fa-450f-8d23-9d566575569e" (UID: "b8e73c54-63fa-450f-8d23-9d566575569e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.688092 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.688131 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.688141 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8e73c54-63fa-450f-8d23-9d566575569e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.688150 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24l6n\" (UniqueName: \"kubernetes.io/projected/b8e73c54-63fa-450f-8d23-9d566575569e-kube-api-access-24l6n\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.826439 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8jrbg" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.826428 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8jrbg" event={"ID":"b8e73c54-63fa-450f-8d23-9d566575569e","Type":"ContainerDied","Data":"97367e1dbc1f8ead218c8241e46e2e0dd112e29e7f23822859227c0f27da90af"} Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.826507 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97367e1dbc1f8ead218c8241e46e2e0dd112e29e7f23822859227c0f27da90af" Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.977631 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.996937 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:45 crc kubenswrapper[4877]: I0128 17:04:45.997202 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="72716d2a-bddd-4fdf-99ac-6dbca5f41cae" containerName="nova-scheduler-scheduler" containerID="cri-o://3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9" gracePeriod=30 Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.045407 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.045721 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-log" containerID="cri-o://78580d8da4f9e778a21362e0f8fe231391e17f17cd90aa7f0185fcc0384b230e" gracePeriod=30 Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.045777 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-metadata" containerID="cri-o://b34f14fdb2ee6a8fb522d4f3fddf57023f6f701cc02b1a570411f4cd1192e82b" gracePeriod=30 Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.170601 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.3:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.170850 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.3:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.479001 4877 scope.go:117] "RemoveContainer" containerID="7efacec95ed943cd67ca1e09e8454f8725b4b6d7395604886f42e58a36c4a64c" Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.511991 4877 scope.go:117] "RemoveContainer" containerID="16783a021d29afc65113e8ae157e172c931d0449e874d38f664cdfd0e5518531" Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.563602 4877 scope.go:117] "RemoveContainer" containerID="cb9005be24322b55c269621c815b4ed453924a096386bfd30bb713d6ac3bad73" Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.844091 4877 generic.go:334] "Generic (PLEG): container finished" podID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerID="78580d8da4f9e778a21362e0f8fe231391e17f17cd90aa7f0185fcc0384b230e" exitCode=143 Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.844604 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-log" containerID="cri-o://eb76f4a927a751f3afe3a16afa16391e4021875fda3397490614a9ef35580094" gracePeriod=30 Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.845042 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-api" containerID="cri-o://abf2484a811c1a751f1970991c49d32c5c3b0510b6947260bcc95e576353ae91" gracePeriod=30 Jan 28 17:04:46 crc kubenswrapper[4877]: I0128 17:04:46.845207 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"617dd54a-0eba-4cb2-8108-11abfdc50ce9","Type":"ContainerDied","Data":"78580d8da4f9e778a21362e0f8fe231391e17f17cd90aa7f0185fcc0384b230e"} Jan 28 17:04:47 crc kubenswrapper[4877]: I0128 17:04:47.857201 4877 generic.go:334] "Generic (PLEG): container finished" podID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerID="eb76f4a927a751f3afe3a16afa16391e4021875fda3397490614a9ef35580094" exitCode=143 Jan 28 17:04:47 crc kubenswrapper[4877]: I0128 17:04:47.857282 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf","Type":"ContainerDied","Data":"eb76f4a927a751f3afe3a16afa16391e4021875fda3397490614a9ef35580094"} Jan 28 17:04:47 crc kubenswrapper[4877]: I0128 17:04:47.859985 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerStarted","Data":"6b2118139046633c8c8b0cbf09e0d8caf1e682c61d33f4b164c84fbefd50c2b4"} Jan 28 17:04:47 crc kubenswrapper[4877]: I0128 17:04:47.860202 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:04:47 crc kubenswrapper[4877]: I0128 17:04:47.893724 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.98883792 podStartE2EDuration="7.893703992s" podCreationTimestamp="2026-01-28 17:04:40 +0000 UTC" firstStartedPulling="2026-01-28 17:04:41.794049672 +0000 UTC m=+1785.352376560" lastFinishedPulling="2026-01-28 17:04:46.698915744 +0000 UTC m=+1790.257242632" observedRunningTime="2026-01-28 17:04:47.881507555 +0000 UTC m=+1791.439834463" watchObservedRunningTime="2026-01-28 17:04:47.893703992 +0000 UTC m=+1791.452030870" Jan 28 17:04:49 crc kubenswrapper[4877]: E0128 17:04:49.258428 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 17:04:49 crc kubenswrapper[4877]: E0128 17:04:49.270664 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 17:04:49 crc kubenswrapper[4877]: E0128 17:04:49.282792 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 17:04:49 crc kubenswrapper[4877]: E0128 17:04:49.282866 4877 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="72716d2a-bddd-4fdf-99ac-6dbca5f41cae" containerName="nova-scheduler-scheduler" Jan 28 17:04:49 crc kubenswrapper[4877]: I0128 17:04:49.444188 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": read tcp 10.217.0.2:49270->10.217.0.250:8775: read: connection reset by peer" Jan 28 17:04:49 crc kubenswrapper[4877]: I0128 17:04:49.444282 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.250:8775/\": read tcp 10.217.0.2:49286->10.217.0.250:8775: read: connection reset by peer" Jan 28 17:04:49 crc kubenswrapper[4877]: I0128 17:04:49.909605 4877 generic.go:334] "Generic (PLEG): container finished" podID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerID="b34f14fdb2ee6a8fb522d4f3fddf57023f6f701cc02b1a570411f4cd1192e82b" exitCode=0 Jan 28 17:04:49 crc kubenswrapper[4877]: I0128 17:04:49.909657 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"617dd54a-0eba-4cb2-8108-11abfdc50ce9","Type":"ContainerDied","Data":"b34f14fdb2ee6a8fb522d4f3fddf57023f6f701cc02b1a570411f4cd1192e82b"} Jan 28 17:04:50 crc kubenswrapper[4877]: I0128 17:04:50.941743 4877 generic.go:334] "Generic (PLEG): container finished" podID="72716d2a-bddd-4fdf-99ac-6dbca5f41cae" containerID="3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9" exitCode=0 Jan 28 17:04:50 crc kubenswrapper[4877]: I0128 17:04:50.941961 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"72716d2a-bddd-4fdf-99ac-6dbca5f41cae","Type":"ContainerDied","Data":"3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9"} Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.245236 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.455182 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-nova-metadata-tls-certs\") pod \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.455268 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhnl2\" (UniqueName: \"kubernetes.io/projected/617dd54a-0eba-4cb2-8108-11abfdc50ce9-kube-api-access-qhnl2\") pod \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.455307 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-config-data\") pod \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.455359 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/617dd54a-0eba-4cb2-8108-11abfdc50ce9-logs\") pod \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.455442 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-combined-ca-bundle\") pod \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\" (UID: \"617dd54a-0eba-4cb2-8108-11abfdc50ce9\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.460102 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/617dd54a-0eba-4cb2-8108-11abfdc50ce9-logs" (OuterVolumeSpecName: "logs") pod "617dd54a-0eba-4cb2-8108-11abfdc50ce9" (UID: "617dd54a-0eba-4cb2-8108-11abfdc50ce9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.489896 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "617dd54a-0eba-4cb2-8108-11abfdc50ce9" (UID: "617dd54a-0eba-4cb2-8108-11abfdc50ce9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.504333 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/617dd54a-0eba-4cb2-8108-11abfdc50ce9-kube-api-access-qhnl2" (OuterVolumeSpecName: "kube-api-access-qhnl2") pod "617dd54a-0eba-4cb2-8108-11abfdc50ce9" (UID: "617dd54a-0eba-4cb2-8108-11abfdc50ce9"). InnerVolumeSpecName "kube-api-access-qhnl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.504920 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-config-data" (OuterVolumeSpecName: "config-data") pod "617dd54a-0eba-4cb2-8108-11abfdc50ce9" (UID: "617dd54a-0eba-4cb2-8108-11abfdc50ce9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.583900 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/617dd54a-0eba-4cb2-8108-11abfdc50ce9-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.583939 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.583953 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhnl2\" (UniqueName: \"kubernetes.io/projected/617dd54a-0eba-4cb2-8108-11abfdc50ce9-kube-api-access-qhnl2\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.583961 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.607998 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "617dd54a-0eba-4cb2-8108-11abfdc50ce9" (UID: "617dd54a-0eba-4cb2-8108-11abfdc50ce9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.686564 4877 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/617dd54a-0eba-4cb2-8108-11abfdc50ce9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.760175 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.788734 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdddx\" (UniqueName: \"kubernetes.io/projected/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-kube-api-access-wdddx\") pod \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.789097 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-config-data\") pod \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.789173 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-combined-ca-bundle\") pod \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\" (UID: \"72716d2a-bddd-4fdf-99ac-6dbca5f41cae\") " Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.815813 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-kube-api-access-wdddx" (OuterVolumeSpecName: "kube-api-access-wdddx") pod "72716d2a-bddd-4fdf-99ac-6dbca5f41cae" (UID: "72716d2a-bddd-4fdf-99ac-6dbca5f41cae"). InnerVolumeSpecName "kube-api-access-wdddx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.824344 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "72716d2a-bddd-4fdf-99ac-6dbca5f41cae" (UID: "72716d2a-bddd-4fdf-99ac-6dbca5f41cae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.835029 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-config-data" (OuterVolumeSpecName: "config-data") pod "72716d2a-bddd-4fdf-99ac-6dbca5f41cae" (UID: "72716d2a-bddd-4fdf-99ac-6dbca5f41cae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.892426 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.892464 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.892531 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdddx\" (UniqueName: \"kubernetes.io/projected/72716d2a-bddd-4fdf-99ac-6dbca5f41cae-kube-api-access-wdddx\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.956403 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"72716d2a-bddd-4fdf-99ac-6dbca5f41cae","Type":"ContainerDied","Data":"d2a2f509bf34254b9d6a0bbcffb462f040aee876e3c9064327e926b6e4bb2682"} Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.956494 4877 scope.go:117] "RemoveContainer" containerID="3920c10d79d32be642d0c8d41a9eaf96f6afde3eca528e4dd617b9a9192b2ed9" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.956510 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.959122 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"617dd54a-0eba-4cb2-8108-11abfdc50ce9","Type":"ContainerDied","Data":"4b763789601210b42716afcd678afc26e9bc7592fafad640d6ef334c2b35dc72"} Jan 28 17:04:51 crc kubenswrapper[4877]: I0128 17:04:51.959283 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.007864 4877 scope.go:117] "RemoveContainer" containerID="b34f14fdb2ee6a8fb522d4f3fddf57023f6f701cc02b1a570411f4cd1192e82b" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.014191 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.032528 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.050027 4877 scope.go:117] "RemoveContainer" containerID="78580d8da4f9e778a21362e0f8fe231391e17f17cd90aa7f0185fcc0384b230e" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.052684 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.069607 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.082359 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: E0128 17:04:52.083393 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8e73c54-63fa-450f-8d23-9d566575569e" containerName="nova-manage" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.083427 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8e73c54-63fa-450f-8d23-9d566575569e" containerName="nova-manage" Jan 28 17:04:52 crc kubenswrapper[4877]: E0128 17:04:52.083507 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-metadata" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.083517 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-metadata" Jan 28 17:04:52 crc kubenswrapper[4877]: E0128 17:04:52.083545 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72716d2a-bddd-4fdf-99ac-6dbca5f41cae" containerName="nova-scheduler-scheduler" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.083555 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="72716d2a-bddd-4fdf-99ac-6dbca5f41cae" containerName="nova-scheduler-scheduler" Jan 28 17:04:52 crc kubenswrapper[4877]: E0128 17:04:52.083599 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-log" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.083609 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-log" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.083952 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="72716d2a-bddd-4fdf-99ac-6dbca5f41cae" containerName="nova-scheduler-scheduler" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.083979 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-metadata" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.083999 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" containerName="nova-metadata-log" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.084016 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8e73c54-63fa-450f-8d23-9d566575569e" containerName="nova-manage" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.086176 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.093620 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.095637 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.102208 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.104373 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.108222 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.119308 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.137110 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202457 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44962a55-73c9-4777-a483-b1b9d15c74aa-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202617 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjb72\" (UniqueName: \"kubernetes.io/projected/44962a55-73c9-4777-a483-b1b9d15c74aa-kube-api-access-bjb72\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202655 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-logs\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202716 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm2jq\" (UniqueName: \"kubernetes.io/projected/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-kube-api-access-fm2jq\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202741 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-config-data\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202763 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202936 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.202962 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44962a55-73c9-4777-a483-b1b9d15c74aa-config-data\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.304975 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44962a55-73c9-4777-a483-b1b9d15c74aa-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.305233 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjb72\" (UniqueName: \"kubernetes.io/projected/44962a55-73c9-4777-a483-b1b9d15c74aa-kube-api-access-bjb72\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.305298 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-logs\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.305351 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm2jq\" (UniqueName: \"kubernetes.io/projected/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-kube-api-access-fm2jq\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.305395 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-config-data\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.305419 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.305589 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.305621 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44962a55-73c9-4777-a483-b1b9d15c74aa-config-data\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.309699 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44962a55-73c9-4777-a483-b1b9d15c74aa-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.312885 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44962a55-73c9-4777-a483-b1b9d15c74aa-config-data\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.325013 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjb72\" (UniqueName: \"kubernetes.io/projected/44962a55-73c9-4777-a483-b1b9d15c74aa-kube-api-access-bjb72\") pod \"nova-scheduler-0\" (UID: \"44962a55-73c9-4777-a483-b1b9d15c74aa\") " pod="openstack/nova-scheduler-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.440651 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-logs\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.440998 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.441108 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.441234 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-config-data\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.443981 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm2jq\" (UniqueName: \"kubernetes.io/projected/f5303fc4-f7b0-4c56-b78c-e4e71fa34936-kube-api-access-fm2jq\") pod \"nova-metadata-0\" (UID: \"f5303fc4-f7b0-4c56-b78c-e4e71fa34936\") " pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.488072 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 17:04:52 crc kubenswrapper[4877]: I0128 17:04:52.502069 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.010559 4877 generic.go:334] "Generic (PLEG): container finished" podID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerID="abf2484a811c1a751f1970991c49d32c5c3b0510b6947260bcc95e576353ae91" exitCode=0 Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.010649 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf","Type":"ContainerDied","Data":"abf2484a811c1a751f1970991c49d32c5c3b0510b6947260bcc95e576353ae91"} Jan 28 17:04:53 crc kubenswrapper[4877]: W0128 17:04:53.039300 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5303fc4_f7b0_4c56_b78c_e4e71fa34936.slice/crio-4124180bd033e37c0361738e8bf9936f57d8e939225ec589ff27762e05d94e02 WatchSource:0}: Error finding container 4124180bd033e37c0361738e8bf9936f57d8e939225ec589ff27762e05d94e02: Status 404 returned error can't find the container with id 4124180bd033e37c0361738e8bf9936f57d8e939225ec589ff27762e05d94e02 Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.041508 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.248921 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 17:04:53 crc kubenswrapper[4877]: W0128 17:04:53.262711 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44962a55_73c9_4777_a483_b1b9d15c74aa.slice/crio-78bf8c6a1ec6fef46dc3507ab4595388606e153badb249c6903f58d1f188fb32 WatchSource:0}: Error finding container 78bf8c6a1ec6fef46dc3507ab4595388606e153badb249c6903f58d1f188fb32: Status 404 returned error can't find the container with id 78bf8c6a1ec6fef46dc3507ab4595388606e153badb249c6903f58d1f188fb32 Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.348957 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="617dd54a-0eba-4cb2-8108-11abfdc50ce9" path="/var/lib/kubelet/pods/617dd54a-0eba-4cb2-8108-11abfdc50ce9/volumes" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.350064 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72716d2a-bddd-4fdf-99ac-6dbca5f41cae" path="/var/lib/kubelet/pods/72716d2a-bddd-4fdf-99ac-6dbca5f41cae/volumes" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.419613 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.431541 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxh8h\" (UniqueName: \"kubernetes.io/projected/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-kube-api-access-vxh8h\") pod \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.431767 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-config-data\") pod \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.431916 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-combined-ca-bundle\") pod \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.432181 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-public-tls-certs\") pod \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.432293 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-internal-tls-certs\") pod \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.432434 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-logs\") pod \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\" (UID: \"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf\") " Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.436043 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-logs" (OuterVolumeSpecName: "logs") pod "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" (UID: "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.444380 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-kube-api-access-vxh8h" (OuterVolumeSpecName: "kube-api-access-vxh8h") pod "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" (UID: "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf"). InnerVolumeSpecName "kube-api-access-vxh8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.496430 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-config-data" (OuterVolumeSpecName: "config-data") pod "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" (UID: "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.498925 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" (UID: "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.536106 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.536146 4877 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-logs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.536169 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxh8h\" (UniqueName: \"kubernetes.io/projected/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-kube-api-access-vxh8h\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.536184 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.554661 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" (UID: "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.570778 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" (UID: "cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.638510 4877 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:53 crc kubenswrapper[4877]: I0128 17:04:53.638558 4877 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.026162 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.026166 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf","Type":"ContainerDied","Data":"1e99f7ed46e6835b5628dd42a72b0432a03bb6151c5fce13922ae495a79b4071"} Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.027363 4877 scope.go:117] "RemoveContainer" containerID="abf2484a811c1a751f1970991c49d32c5c3b0510b6947260bcc95e576353ae91" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.036282 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"44962a55-73c9-4777-a483-b1b9d15c74aa","Type":"ContainerStarted","Data":"e81e38f971f0b508c06f8c342903ab570a3b694df3b136441cb092d8c9e41684"} Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.036349 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"44962a55-73c9-4777-a483-b1b9d15c74aa","Type":"ContainerStarted","Data":"78bf8c6a1ec6fef46dc3507ab4595388606e153badb249c6903f58d1f188fb32"} Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.040374 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5303fc4-f7b0-4c56-b78c-e4e71fa34936","Type":"ContainerStarted","Data":"f8d84a91ab8ad245134932311532252761369e201e95539994d8b13e72374e75"} Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.040449 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5303fc4-f7b0-4c56-b78c-e4e71fa34936","Type":"ContainerStarted","Data":"4124180bd033e37c0361738e8bf9936f57d8e939225ec589ff27762e05d94e02"} Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.058925 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.058905868 podStartE2EDuration="2.058905868s" podCreationTimestamp="2026-01-28 17:04:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:54.054197271 +0000 UTC m=+1797.612524209" watchObservedRunningTime="2026-01-28 17:04:54.058905868 +0000 UTC m=+1797.617232756" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.069673 4877 scope.go:117] "RemoveContainer" containerID="eb76f4a927a751f3afe3a16afa16391e4021875fda3397490614a9ef35580094" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.134875 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.176086 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.192388 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:54 crc kubenswrapper[4877]: E0128 17:04:54.193130 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-api" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.193156 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-api" Jan 28 17:04:54 crc kubenswrapper[4877]: E0128 17:04:54.193170 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-log" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.193178 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-log" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.193545 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-log" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.193593 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" containerName="nova-api-api" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.195294 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.199614 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.199732 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.200579 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.222763 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.253968 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.254075 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-public-tls-certs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.254295 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xgkh\" (UniqueName: \"kubernetes.io/projected/a9930248-62c1-488f-8df3-81a2bb05c721-kube-api-access-4xgkh\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.254587 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-config-data\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.254743 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.254909 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9930248-62c1-488f-8df3-81a2bb05c721-logs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.357236 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.357358 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9930248-62c1-488f-8df3-81a2bb05c721-logs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.357433 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.357505 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-public-tls-certs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.357553 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xgkh\" (UniqueName: \"kubernetes.io/projected/a9930248-62c1-488f-8df3-81a2bb05c721-kube-api-access-4xgkh\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.357635 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-config-data\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.358996 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9930248-62c1-488f-8df3-81a2bb05c721-logs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.362079 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.364075 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-public-tls-certs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.370779 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-config-data\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.374871 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9930248-62c1-488f-8df3-81a2bb05c721-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.377624 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xgkh\" (UniqueName: \"kubernetes.io/projected/a9930248-62c1-488f-8df3-81a2bb05c721-kube-api-access-4xgkh\") pod \"nova-api-0\" (UID: \"a9930248-62c1-488f-8df3-81a2bb05c721\") " pod="openstack/nova-api-0" Jan 28 17:04:54 crc kubenswrapper[4877]: I0128 17:04:54.525619 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 17:04:55 crc kubenswrapper[4877]: I0128 17:04:55.056406 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5303fc4-f7b0-4c56-b78c-e4e71fa34936","Type":"ContainerStarted","Data":"3a9513818c945ab2aa68307f4b5dee1b2f084b0197368d1cef363a43402c9226"} Jan 28 17:04:55 crc kubenswrapper[4877]: I0128 17:04:55.060211 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 17:04:55 crc kubenswrapper[4877]: W0128 17:04:55.077753 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9930248_62c1_488f_8df3_81a2bb05c721.slice/crio-135893ca05e038bc97b33fd8f5921544320a0f0e69c3a52c680921bb9e72a544 WatchSource:0}: Error finding container 135893ca05e038bc97b33fd8f5921544320a0f0e69c3a52c680921bb9e72a544: Status 404 returned error can't find the container with id 135893ca05e038bc97b33fd8f5921544320a0f0e69c3a52c680921bb9e72a544 Jan 28 17:04:55 crc kubenswrapper[4877]: I0128 17:04:55.084060 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.084044589 podStartE2EDuration="3.084044589s" podCreationTimestamp="2026-01-28 17:04:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:55.079681152 +0000 UTC m=+1798.638008030" watchObservedRunningTime="2026-01-28 17:04:55.084044589 +0000 UTC m=+1798.642371477" Jan 28 17:04:55 crc kubenswrapper[4877]: I0128 17:04:55.351094 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf" path="/var/lib/kubelet/pods/cd0ca209-cd3e-4a55-8c92-fd7eda0e8cbf/volumes" Jan 28 17:04:56 crc kubenswrapper[4877]: I0128 17:04:56.072163 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9930248-62c1-488f-8df3-81a2bb05c721","Type":"ContainerStarted","Data":"81a5a8b5ba46893e47dafe5fe3939fc35e3ee2360a4c975e9a24adb58e72ce2d"} Jan 28 17:04:56 crc kubenswrapper[4877]: I0128 17:04:56.072500 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9930248-62c1-488f-8df3-81a2bb05c721","Type":"ContainerStarted","Data":"c86260f63db9d2e69442b191c06839d471f0a368b9e82cddce0e7e0a809ea568"} Jan 28 17:04:56 crc kubenswrapper[4877]: I0128 17:04:56.072516 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a9930248-62c1-488f-8df3-81a2bb05c721","Type":"ContainerStarted","Data":"135893ca05e038bc97b33fd8f5921544320a0f0e69c3a52c680921bb9e72a544"} Jan 28 17:04:56 crc kubenswrapper[4877]: I0128 17:04:56.102846 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.102454559 podStartE2EDuration="2.102454559s" podCreationTimestamp="2026-01-28 17:04:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:04:56.088627567 +0000 UTC m=+1799.646954465" watchObservedRunningTime="2026-01-28 17:04:56.102454559 +0000 UTC m=+1799.660781447" Jan 28 17:04:57 crc kubenswrapper[4877]: I0128 17:04:57.369757 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:04:57 crc kubenswrapper[4877]: E0128 17:04:57.370376 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:04:57 crc kubenswrapper[4877]: I0128 17:04:57.488344 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 17:04:57 crc kubenswrapper[4877]: I0128 17:04:57.488408 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 17:04:57 crc kubenswrapper[4877]: I0128 17:04:57.502888 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 17:05:02 crc kubenswrapper[4877]: I0128 17:05:02.488241 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 17:05:02 crc kubenswrapper[4877]: I0128 17:05:02.488840 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 17:05:02 crc kubenswrapper[4877]: I0128 17:05:02.502843 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 17:05:02 crc kubenswrapper[4877]: I0128 17:05:02.537072 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 17:05:03 crc kubenswrapper[4877]: I0128 17:05:03.186110 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 17:05:03 crc kubenswrapper[4877]: I0128 17:05:03.501717 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f5303fc4-f7b0-4c56-b78c-e4e71fa34936" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:05:03 crc kubenswrapper[4877]: I0128 17:05:03.501794 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="f5303fc4-f7b0-4c56-b78c-e4e71fa34936" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:05:04 crc kubenswrapper[4877]: I0128 17:05:04.526278 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:05:04 crc kubenswrapper[4877]: I0128 17:05:04.526542 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.539750 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a9930248-62c1-488f-8df3-81a2bb05c721" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.539908 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a9930248-62c1-488f-8df3-81a2bb05c721" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.8:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.854890 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fsrk2"] Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.862535 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.894432 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fsrk2"] Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.983106 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-catalog-content\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.983208 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sskq9\" (UniqueName: \"kubernetes.io/projected/e5954741-e854-495a-9122-509fcfa1ec6c-kube-api-access-sskq9\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:05 crc kubenswrapper[4877]: I0128 17:05:05.983256 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-utilities\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.086086 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-utilities\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.086573 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-catalog-content\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.086704 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sskq9\" (UniqueName: \"kubernetes.io/projected/e5954741-e854-495a-9122-509fcfa1ec6c-kube-api-access-sskq9\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.086892 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-utilities\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.087979 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-catalog-content\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.110364 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sskq9\" (UniqueName: \"kubernetes.io/projected/e5954741-e854-495a-9122-509fcfa1ec6c-kube-api-access-sskq9\") pod \"redhat-operators-fsrk2\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.189030 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:05:06 crc kubenswrapper[4877]: W0128 17:05:06.659815 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5954741_e854_495a_9122_509fcfa1ec6c.slice/crio-4a587856150e6046100be0b70885ba76216cea65928c0ff68b42fa2bf2fe0d76 WatchSource:0}: Error finding container 4a587856150e6046100be0b70885ba76216cea65928c0ff68b42fa2bf2fe0d76: Status 404 returned error can't find the container with id 4a587856150e6046100be0b70885ba76216cea65928c0ff68b42fa2bf2fe0d76 Jan 28 17:05:06 crc kubenswrapper[4877]: I0128 17:05:06.660074 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fsrk2"] Jan 28 17:05:07 crc kubenswrapper[4877]: I0128 17:05:07.200998 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fsrk2" event={"ID":"e5954741-e854-495a-9122-509fcfa1ec6c","Type":"ContainerStarted","Data":"4a587856150e6046100be0b70885ba76216cea65928c0ff68b42fa2bf2fe0d76"} Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.204746 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wxcqh"] Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.209076 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.219722 4877 generic.go:334] "Generic (PLEG): container finished" podID="e5954741-e854-495a-9122-509fcfa1ec6c" containerID="7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690" exitCode=0 Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.219778 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fsrk2" event={"ID":"e5954741-e854-495a-9122-509fcfa1ec6c","Type":"ContainerDied","Data":"7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690"} Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.223280 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wxcqh"] Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.362441 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-catalog-content\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.363046 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss6p5\" (UniqueName: \"kubernetes.io/projected/dd412c38-2df2-4881-8293-4866583158c8-kube-api-access-ss6p5\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.363365 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-utilities\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.467922 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss6p5\" (UniqueName: \"kubernetes.io/projected/dd412c38-2df2-4881-8293-4866583158c8-kube-api-access-ss6p5\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.468135 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-utilities\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.468375 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-catalog-content\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.468568 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-utilities\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.468915 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-catalog-content\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.488993 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss6p5\" (UniqueName: \"kubernetes.io/projected/dd412c38-2df2-4881-8293-4866583158c8-kube-api-access-ss6p5\") pod \"community-operators-wxcqh\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:08 crc kubenswrapper[4877]: I0128 17:05:08.566732 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:05:09 crc kubenswrapper[4877]: I0128 17:05:09.125994 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wxcqh"] Jan 28 17:05:09 crc kubenswrapper[4877]: W0128 17:05:09.127676 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd412c38_2df2_4881_8293_4866583158c8.slice/crio-80f6b3fabd7081f0eab8dcf583397ef377c566cb803d1d07c132ce9bdef6949e WatchSource:0}: Error finding container 80f6b3fabd7081f0eab8dcf583397ef377c566cb803d1d07c132ce9bdef6949e: Status 404 returned error can't find the container with id 80f6b3fabd7081f0eab8dcf583397ef377c566cb803d1d07c132ce9bdef6949e Jan 28 17:05:09 crc kubenswrapper[4877]: I0128 17:05:09.233432 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxcqh" event={"ID":"dd412c38-2df2-4881-8293-4866583158c8","Type":"ContainerStarted","Data":"80f6b3fabd7081f0eab8dcf583397ef377c566cb803d1d07c132ce9bdef6949e"} Jan 28 17:05:09 crc kubenswrapper[4877]: I0128 17:05:09.330884 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:05:09 crc kubenswrapper[4877]: E0128 17:05:09.331198 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:05:10 crc kubenswrapper[4877]: I0128 17:05:10.255278 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fsrk2" event={"ID":"e5954741-e854-495a-9122-509fcfa1ec6c","Type":"ContainerStarted","Data":"c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882"} Jan 28 17:05:10 crc kubenswrapper[4877]: I0128 17:05:10.259629 4877 generic.go:334] "Generic (PLEG): container finished" podID="dd412c38-2df2-4881-8293-4866583158c8" containerID="2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf" exitCode=0 Jan 28 17:05:10 crc kubenswrapper[4877]: I0128 17:05:10.259831 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxcqh" event={"ID":"dd412c38-2df2-4881-8293-4866583158c8","Type":"ContainerDied","Data":"2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf"} Jan 28 17:05:11 crc kubenswrapper[4877]: I0128 17:05:11.477895 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 17:05:12 crc kubenswrapper[4877]: I0128 17:05:12.495528 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 17:05:12 crc kubenswrapper[4877]: I0128 17:05:12.498587 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 17:05:12 crc kubenswrapper[4877]: I0128 17:05:12.507450 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 17:05:13 crc kubenswrapper[4877]: I0128 17:05:13.306804 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 17:05:14 crc kubenswrapper[4877]: I0128 17:05:14.314843 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxcqh" event={"ID":"dd412c38-2df2-4881-8293-4866583158c8","Type":"ContainerStarted","Data":"024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee"} Jan 28 17:05:14 crc kubenswrapper[4877]: I0128 17:05:14.532823 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 17:05:14 crc kubenswrapper[4877]: I0128 17:05:14.533271 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 17:05:14 crc kubenswrapper[4877]: I0128 17:05:14.541644 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 17:05:14 crc kubenswrapper[4877]: I0128 17:05:14.560046 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 17:05:15 crc kubenswrapper[4877]: I0128 17:05:15.327533 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 17:05:15 crc kubenswrapper[4877]: I0128 17:05:15.345792 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 17:05:20 crc kubenswrapper[4877]: I0128 17:05:20.330915 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:05:20 crc kubenswrapper[4877]: E0128 17:05:20.331801 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:05:26 crc kubenswrapper[4877]: I0128 17:05:26.852919 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-pbjv2"] Jan 28 17:05:26 crc kubenswrapper[4877]: I0128 17:05:26.871716 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-pbjv2"] Jan 28 17:05:26 crc kubenswrapper[4877]: I0128 17:05:26.887240 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-t55qb"] Jan 28 17:05:26 crc kubenswrapper[4877]: I0128 17:05:26.891086 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:26 crc kubenswrapper[4877]: I0128 17:05:26.913755 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-t55qb"] Jan 28 17:05:26 crc kubenswrapper[4877]: I0128 17:05:26.962916 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-gtz9w"] Jan 28 17:05:26 crc kubenswrapper[4877]: I0128 17:05:26.986458 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-gtz9w"] Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.003585 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx5cg\" (UniqueName: \"kubernetes.io/projected/4b5478d7-63fb-41e2-89c8-1d27290a9844-kube-api-access-vx5cg\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.003810 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-combined-ca-bundle\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.004448 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-config-data\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.106853 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-combined-ca-bundle\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.107040 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-config-data\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.107102 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx5cg\" (UniqueName: \"kubernetes.io/projected/4b5478d7-63fb-41e2-89c8-1d27290a9844-kube-api-access-vx5cg\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.114244 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-config-data\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.114788 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-combined-ca-bundle\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.129868 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx5cg\" (UniqueName: \"kubernetes.io/projected/4b5478d7-63fb-41e2-89c8-1d27290a9844-kube-api-access-vx5cg\") pod \"heat-db-sync-t55qb\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.347591 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5" path="/var/lib/kubelet/pods/0dd1ad43-4b6a-453c-8eeb-c90b19e5a1e5/volumes" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.348299 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="894b6108-3063-40a2-809a-b8f8393b3ecc" path="/var/lib/kubelet/pods/894b6108-3063-40a2-809a-b8f8393b3ecc/volumes" Jan 28 17:05:27 crc kubenswrapper[4877]: I0128 17:05:27.372688 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-t55qb" Jan 28 17:05:28 crc kubenswrapper[4877]: I0128 17:05:28.663699 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:05:28 crc kubenswrapper[4877]: I0128 17:05:28.664572 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-notification-agent" containerID="cri-o://129a129c1f9621481b00d05607a3aeef040820ce458f6b7114d8f32934d2cc36" gracePeriod=30 Jan 28 17:05:28 crc kubenswrapper[4877]: I0128 17:05:28.664579 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="proxy-httpd" containerID="cri-o://6b2118139046633c8c8b0cbf09e0d8caf1e682c61d33f4b164c84fbefd50c2b4" gracePeriod=30 Jan 28 17:05:28 crc kubenswrapper[4877]: I0128 17:05:28.664515 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-central-agent" containerID="cri-o://efd8471085341bd2888975f79dea337e1f50d9041cd520726407bd81e219fa65" gracePeriod=30 Jan 28 17:05:28 crc kubenswrapper[4877]: I0128 17:05:28.664599 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="sg-core" containerID="cri-o://35eb8e578ef48ad568ce4c632e2516aa822ae878bd180030879b3e4923f11b54" gracePeriod=30 Jan 28 17:05:29 crc kubenswrapper[4877]: I0128 17:05:29.196264 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-t55qb"] Jan 28 17:05:29 crc kubenswrapper[4877]: I0128 17:05:29.524040 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-t55qb" event={"ID":"4b5478d7-63fb-41e2-89c8-1d27290a9844","Type":"ContainerStarted","Data":"b59bf0d834ce591487f9fc0a44b36ed72b859d78f7bac6f96769d0bb1836c1e4"} Jan 28 17:05:30 crc kubenswrapper[4877]: E0128 17:05:30.102251 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbce26068_762c_4da5_aa19_cf4c27d56c19.slice/crio-conmon-efd8471085341bd2888975f79dea337e1f50d9041cd520726407bd81e219fa65.scope\": RecentStats: unable to find data in memory cache]" Jan 28 17:05:30 crc kubenswrapper[4877]: I0128 17:05:30.377119 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 17:05:30 crc kubenswrapper[4877]: I0128 17:05:30.586526 4877 generic.go:334] "Generic (PLEG): container finished" podID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerID="6b2118139046633c8c8b0cbf09e0d8caf1e682c61d33f4b164c84fbefd50c2b4" exitCode=0 Jan 28 17:05:30 crc kubenswrapper[4877]: I0128 17:05:30.586559 4877 generic.go:334] "Generic (PLEG): container finished" podID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerID="35eb8e578ef48ad568ce4c632e2516aa822ae878bd180030879b3e4923f11b54" exitCode=2 Jan 28 17:05:30 crc kubenswrapper[4877]: I0128 17:05:30.586568 4877 generic.go:334] "Generic (PLEG): container finished" podID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerID="efd8471085341bd2888975f79dea337e1f50d9041cd520726407bd81e219fa65" exitCode=0 Jan 28 17:05:30 crc kubenswrapper[4877]: I0128 17:05:30.586587 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerDied","Data":"6b2118139046633c8c8b0cbf09e0d8caf1e682c61d33f4b164c84fbefd50c2b4"} Jan 28 17:05:30 crc kubenswrapper[4877]: I0128 17:05:30.586613 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerDied","Data":"35eb8e578ef48ad568ce4c632e2516aa822ae878bd180030879b3e4923f11b54"} Jan 28 17:05:30 crc kubenswrapper[4877]: I0128 17:05:30.586623 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerDied","Data":"efd8471085341bd2888975f79dea337e1f50d9041cd520726407bd81e219fa65"} Jan 28 17:05:31 crc kubenswrapper[4877]: I0128 17:05:31.333944 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:05:31 crc kubenswrapper[4877]: E0128 17:05:31.334562 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:05:31 crc kubenswrapper[4877]: I0128 17:05:31.580891 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 17:05:31 crc kubenswrapper[4877]: I0128 17:05:31.604692 4877 generic.go:334] "Generic (PLEG): container finished" podID="dd412c38-2df2-4881-8293-4866583158c8" containerID="024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee" exitCode=0 Jan 28 17:05:31 crc kubenswrapper[4877]: I0128 17:05:31.604946 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxcqh" event={"ID":"dd412c38-2df2-4881-8293-4866583158c8","Type":"ContainerDied","Data":"024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee"} Jan 28 17:05:36 crc kubenswrapper[4877]: I0128 17:05:36.681087 4877 generic.go:334] "Generic (PLEG): container finished" podID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerID="129a129c1f9621481b00d05607a3aeef040820ce458f6b7114d8f32934d2cc36" exitCode=0 Jan 28 17:05:36 crc kubenswrapper[4877]: I0128 17:05:36.681174 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerDied","Data":"129a129c1f9621481b00d05607a3aeef040820ce458f6b7114d8f32934d2cc36"} Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.081006 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.234685 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-run-httpd\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.234838 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-combined-ca-bundle\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.234876 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-ceilometer-tls-certs\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.234953 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rwfg\" (UniqueName: \"kubernetes.io/projected/bce26068-762c-4da5-aa19-cf4c27d56c19-kube-api-access-2rwfg\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.234998 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-sg-core-conf-yaml\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.235072 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-config-data\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.235100 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-scripts\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.235221 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-log-httpd\") pod \"bce26068-762c-4da5-aa19-cf4c27d56c19\" (UID: \"bce26068-762c-4da5-aa19-cf4c27d56c19\") " Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.235904 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.236020 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.236343 4877 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.236362 4877 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bce26068-762c-4da5-aa19-cf4c27d56c19-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.321176 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-scripts" (OuterVolumeSpecName: "scripts") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.338567 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.348699 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bce26068-762c-4da5-aa19-cf4c27d56c19-kube-api-access-2rwfg" (OuterVolumeSpecName: "kube-api-access-2rwfg") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "kube-api-access-2rwfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.358630 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.419050 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.441309 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.441352 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rwfg\" (UniqueName: \"kubernetes.io/projected/bce26068-762c-4da5-aa19-cf4c27d56c19-kube-api-access-2rwfg\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.441372 4877 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.452737 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.498598 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-config-data" (OuterVolumeSpecName: "config-data") pod "bce26068-762c-4da5-aa19-cf4c27d56c19" (UID: "bce26068-762c-4da5-aa19-cf4c27d56c19"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.543850 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.543909 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce26068-762c-4da5-aa19-cf4c27d56c19-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.713252 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bce26068-762c-4da5-aa19-cf4c27d56c19","Type":"ContainerDied","Data":"03d08429375204d5cb2689cfc0cd62adea00a48b57e0a07c432da0f251d11fe9"} Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.713310 4877 scope.go:117] "RemoveContainer" containerID="6b2118139046633c8c8b0cbf09e0d8caf1e682c61d33f4b164c84fbefd50c2b4" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.713323 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.924891 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.944733 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.960996 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:05:38 crc kubenswrapper[4877]: E0128 17:05:38.961748 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="sg-core" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.961769 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="sg-core" Jan 28 17:05:38 crc kubenswrapper[4877]: E0128 17:05:38.961801 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-central-agent" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.961811 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-central-agent" Jan 28 17:05:38 crc kubenswrapper[4877]: E0128 17:05:38.961823 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="proxy-httpd" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.961830 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="proxy-httpd" Jan 28 17:05:38 crc kubenswrapper[4877]: E0128 17:05:38.961841 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-notification-agent" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.961847 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-notification-agent" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.962118 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="proxy-httpd" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.962136 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-central-agent" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.962154 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="sg-core" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.962167 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" containerName="ceilometer-notification-agent" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.965012 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.971004 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.971278 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.971417 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 17:05:38 crc kubenswrapper[4877]: I0128 17:05:38.989583 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.060426 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8j56\" (UniqueName: \"kubernetes.io/projected/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-kube-api-access-p8j56\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.060517 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.060643 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.060693 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.061014 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-scripts\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.061171 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-log-httpd\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.061447 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-config-data\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.061671 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-run-httpd\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.165842 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-config-data\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166019 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-run-httpd\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166252 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8j56\" (UniqueName: \"kubernetes.io/projected/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-kube-api-access-p8j56\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166345 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166444 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166500 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166595 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-scripts\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166634 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-log-httpd\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.166733 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-run-httpd\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.167277 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-log-httpd\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.175409 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-scripts\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.175537 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.181324 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-config-data\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.182423 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.190970 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.194289 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8j56\" (UniqueName: \"kubernetes.io/projected/5b2e9ef8-3e88-4cec-bda6-2c143670f73a-kube-api-access-p8j56\") pod \"ceilometer-0\" (UID: \"5b2e9ef8-3e88-4cec-bda6-2c143670f73a\") " pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.344424 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 17:05:39 crc kubenswrapper[4877]: I0128 17:05:39.353131 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bce26068-762c-4da5-aa19-cf4c27d56c19" path="/var/lib/kubelet/pods/bce26068-762c-4da5-aa19-cf4c27d56c19/volumes" Jan 28 17:05:40 crc kubenswrapper[4877]: I0128 17:05:40.777314 4877 scope.go:117] "RemoveContainer" containerID="35eb8e578ef48ad568ce4c632e2516aa822ae878bd180030879b3e4923f11b54" Jan 28 17:05:40 crc kubenswrapper[4877]: I0128 17:05:40.833353 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 28 17:05:41 crc kubenswrapper[4877]: I0128 17:05:41.076118 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" containerID="cri-o://793925a2d2bcdc32855957ec2a7488bda261e25aa749028fb609ae0ca451af05" gracePeriod=604791 Jan 28 17:05:41 crc kubenswrapper[4877]: I0128 17:05:41.094564 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" containerID="cri-o://f5725047d30bdfc1c6d9311e0bc73bc548184731c937882127b2785a7410e9fc" gracePeriod=604790 Jan 28 17:05:41 crc kubenswrapper[4877]: I0128 17:05:41.273765 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Jan 28 17:05:46 crc kubenswrapper[4877]: I0128 17:05:46.330778 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:05:50 crc kubenswrapper[4877]: I0128 17:05:50.833560 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Jan 28 17:05:51 crc kubenswrapper[4877]: I0128 17:05:51.307176 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: connect: connection refused" Jan 28 17:05:53 crc kubenswrapper[4877]: I0128 17:05:53.485353 4877 scope.go:117] "RemoveContainer" containerID="129a129c1f9621481b00d05607a3aeef040820ce458f6b7114d8f32934d2cc36" Jan 28 17:05:54 crc kubenswrapper[4877]: I0128 17:05:54.912513 4877 generic.go:334] "Generic (PLEG): container finished" podID="d96b5016-3ed4-4f98-8708-f69092894981" containerID="f5725047d30bdfc1c6d9311e0bc73bc548184731c937882127b2785a7410e9fc" exitCode=0 Jan 28 17:05:54 crc kubenswrapper[4877]: I0128 17:05:54.912607 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"d96b5016-3ed4-4f98-8708-f69092894981","Type":"ContainerDied","Data":"f5725047d30bdfc1c6d9311e0bc73bc548184731c937882127b2785a7410e9fc"} Jan 28 17:05:54 crc kubenswrapper[4877]: I0128 17:05:54.916948 4877 generic.go:334] "Generic (PLEG): container finished" podID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerID="793925a2d2bcdc32855957ec2a7488bda261e25aa749028fb609ae0ca451af05" exitCode=0 Jan 28 17:05:54 crc kubenswrapper[4877]: I0128 17:05:54.916997 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5d261b3a-c6f9-48bd-92de-b76d3821e778","Type":"ContainerDied","Data":"793925a2d2bcdc32855957ec2a7488bda261e25aa749028fb609ae0ca451af05"} Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.517400 4877 scope.go:117] "RemoveContainer" containerID="efd8471085341bd2888975f79dea337e1f50d9041cd520726407bd81e219fa65" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.727495 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.734865 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.833989 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: i/o timeout" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.834092 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.867043 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-config-data\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.875639 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.875907 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-tls\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.876090 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-plugins-conf\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.876216 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-erlang-cookie\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.876252 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-server-conf\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.877802 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.884828 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.902509 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903108 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5d261b3a-c6f9-48bd-92de-b76d3821e778-pod-info\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903176 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkp5p\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-kube-api-access-pkp5p\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903225 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96b5016-3ed4-4f98-8708-f69092894981-erlang-cookie-secret\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903277 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-config-data\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903301 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96b5016-3ed4-4f98-8708-f69092894981-pod-info\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903325 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-plugins\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903467 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-confd\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903540 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5d261b3a-c6f9-48bd-92de-b76d3821e778-erlang-cookie-secret\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903581 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-confd\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903623 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-server-conf\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903651 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-erlang-cookie\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903693 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djqhq\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-kube-api-access-djqhq\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903716 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-tls\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903742 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-plugins\") pod \"d96b5016-3ed4-4f98-8708-f69092894981\" (UID: \"d96b5016-3ed4-4f98-8708-f69092894981\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.903806 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-plugins-conf\") pod \"5d261b3a-c6f9-48bd-92de-b76d3821e778\" (UID: \"5d261b3a-c6f9-48bd-92de-b76d3821e778\") " Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.905107 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.905134 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.912785 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-kube-api-access-pkp5p" (OuterVolumeSpecName: "kube-api-access-pkp5p") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "kube-api-access-pkp5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.936960 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.938535 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.938785 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.940851 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5d261b3a-c6f9-48bd-92de-b76d3821e778-pod-info" (OuterVolumeSpecName: "pod-info") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.942709 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.944307 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d261b3a-c6f9-48bd-92de-b76d3821e778-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.949250 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-config-data" (OuterVolumeSpecName: "config-data") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.949045 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.955683 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d96b5016-3ed4-4f98-8708-f69092894981-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.962034 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/d96b5016-3ed4-4f98-8708-f69092894981-pod-info" (OuterVolumeSpecName: "pod-info") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.978860 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-kube-api-access-djqhq" (OuterVolumeSpecName: "kube-api-access-djqhq") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "kube-api-access-djqhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.980063 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7" (OuterVolumeSpecName: "persistence") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.985206 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-server-conf" (OuterVolumeSpecName: "server-conf") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.991791 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:05 crc kubenswrapper[4877]: I0128 17:06:05.997109 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3" (OuterVolumeSpecName: "persistence") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008271 4877 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5d261b3a-c6f9-48bd-92de-b76d3821e778-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008309 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008319 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djqhq\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-kube-api-access-djqhq\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008329 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008337 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008344 4877 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008352 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008390 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") on node \"crc\" " Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008401 4877 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008410 4877 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008425 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") on node \"crc\" " Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008436 4877 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5d261b3a-c6f9-48bd-92de-b76d3821e778-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008445 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkp5p\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-kube-api-access-pkp5p\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008454 4877 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96b5016-3ed4-4f98-8708-f69092894981-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008463 4877 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96b5016-3ed4-4f98-8708-f69092894981-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.008471 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.038502 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-config-data" (OuterVolumeSpecName: "config-data") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.066724 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-server-conf" (OuterVolumeSpecName: "server-conf") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.085752 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.102498 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"d96b5016-3ed4-4f98-8708-f69092894981","Type":"ContainerDied","Data":"67365e2f49d6e215b009152f3e45420221902eb62d9b05c5f8cb5dc80ce1d0bd"} Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.102636 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.113201 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96b5016-3ed4-4f98-8708-f69092894981-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.113254 4877 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5d261b3a-c6f9-48bd-92de-b76d3821e778-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.114893 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5d261b3a-c6f9-48bd-92de-b76d3821e778","Type":"ContainerDied","Data":"ac6dc4628309769ce3a3015e4c19f1e05fba9f79f1d39100372e0e3baa85312c"} Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.115049 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.129249 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.129386 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3") on node "crc" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.131786 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.132092 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7") on node "crc" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.178790 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "d96b5016-3ed4-4f98-8708-f69092894981" (UID: "d96b5016-3ed4-4f98-8708-f69092894981"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.215736 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.215768 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96b5016-3ed4-4f98-8708-f69092894981-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.215804 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.247068 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5d261b3a-c6f9-48bd-92de-b76d3821e778" (UID: "5d261b3a-c6f9-48bd-92de-b76d3821e778"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.273924 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: i/o timeout" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.274025 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.318510 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5d261b3a-c6f9-48bd-92de-b76d3821e778-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.495973 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.510404 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.523752 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.571988 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.588322 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 17:06:06 crc kubenswrapper[4877]: E0128 17:06:06.589000 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.589031 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" Jan 28 17:06:06 crc kubenswrapper[4877]: E0128 17:06:06.589052 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.589060 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" Jan 28 17:06:06 crc kubenswrapper[4877]: E0128 17:06:06.589085 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="setup-container" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.589092 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="setup-container" Jan 28 17:06:06 crc kubenswrapper[4877]: E0128 17:06:06.589127 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="setup-container" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.589135 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="setup-container" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.589468 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" containerName="rabbitmq" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.589514 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d96b5016-3ed4-4f98-8708-f69092894981" containerName="rabbitmq" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.592388 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.621280 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.624263 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.627253 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.627693 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.627852 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.628010 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.628152 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.636930 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-j77zw" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.638786 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.681582 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.720561 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.730975 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-server-conf\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731049 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731086 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1dcfedb4-c672-4dc9-86bd-340f07ccc805-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731113 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1dcfedb4-c672-4dc9-86bd-340f07ccc805-pod-info\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731168 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg2l8\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-kube-api-access-sg2l8\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731209 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731241 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731274 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731308 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mcgj\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-kube-api-access-6mcgj\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731676 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731797 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731874 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.731977 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/45c31f3a-8427-4523-b741-1b317afc8ee6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732176 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732234 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732497 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732632 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-config-data\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732678 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/45c31f3a-8427-4523-b741-1b317afc8ee6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732769 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732871 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.732992 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.733023 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.835785 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.835886 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-config-data\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.835915 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/45c31f3a-8427-4523-b741-1b317afc8ee6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.835973 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836038 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836105 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836127 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836163 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-server-conf\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836188 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836316 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1dcfedb4-c672-4dc9-86bd-340f07ccc805-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836345 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1dcfedb4-c672-4dc9-86bd-340f07ccc805-pod-info\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836341 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836420 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg2l8\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-kube-api-access-sg2l8\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836457 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836511 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836546 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836578 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mcgj\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-kube-api-access-6mcgj\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836611 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836642 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836678 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836729 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/45c31f3a-8427-4523-b741-1b317afc8ee6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836771 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836799 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.836859 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-config-data\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.837169 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.837514 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.837718 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-plugins\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.837901 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.838464 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-plugins-conf\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.838632 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1dcfedb4-c672-4dc9-86bd-340f07ccc805-server-conf\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.838938 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.839165 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.839191 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/dd13eadb591ae01e691b5638b2fec0ba44b9edb8b2da41d7fd4b1906fea1a0e1/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.841315 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.841370 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/15ce35ef8412648b43e30449966151966004a81cada05d357841689a3aa2f9ec/globalmount\"" pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.842906 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/45c31f3a-8427-4523-b741-1b317afc8ee6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.843154 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/45c31f3a-8427-4523-b741-1b317afc8ee6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.844294 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.846641 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1dcfedb4-c672-4dc9-86bd-340f07ccc805-pod-info\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.846696 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-tls\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.848052 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/45c31f3a-8427-4523-b741-1b317afc8ee6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.848524 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-rabbitmq-confd\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.849133 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1dcfedb4-c672-4dc9-86bd-340f07ccc805-erlang-cookie-secret\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.850111 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.859571 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg2l8\" (UniqueName: \"kubernetes.io/projected/1dcfedb4-c672-4dc9-86bd-340f07ccc805-kube-api-access-sg2l8\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.860263 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mcgj\" (UniqueName: \"kubernetes.io/projected/45c31f3a-8427-4523-b741-1b317afc8ee6-kube-api-access-6mcgj\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.906341 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0a50f2f4-cca5-4d3e-8e3c-be2b936bc8b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"45c31f3a-8427-4523-b741-1b317afc8ee6\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.908846 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4578803-2e8d-44ba-a5df-422d8d7eb2b7\") pod \"rabbitmq-server-2\" (UID: \"1dcfedb4-c672-4dc9-86bd-340f07ccc805\") " pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.931036 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-2" Jan 28 17:06:06 crc kubenswrapper[4877]: I0128 17:06:06.964310 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:07 crc kubenswrapper[4877]: I0128 17:06:07.368140 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d261b3a-c6f9-48bd-92de-b76d3821e778" path="/var/lib/kubelet/pods/5d261b3a-c6f9-48bd-92de-b76d3821e778/volumes" Jan 28 17:06:07 crc kubenswrapper[4877]: I0128 17:06:07.370370 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d96b5016-3ed4-4f98-8708-f69092894981" path="/var/lib/kubelet/pods/d96b5016-3ed4-4f98-8708-f69092894981/volumes" Jan 28 17:06:07 crc kubenswrapper[4877]: I0128 17:06:07.645551 4877 scope.go:117] "RemoveContainer" containerID="f5725047d30bdfc1c6d9311e0bc73bc548184731c937882127b2785a7410e9fc" Jan 28 17:06:07 crc kubenswrapper[4877]: I0128 17:06:07.836409 4877 scope.go:117] "RemoveContainer" containerID="52942a240b967e15d227027e6b2bf1fa3d2ce227a325fd22c64b791144cfc420" Jan 28 17:06:07 crc kubenswrapper[4877]: I0128 17:06:07.940896 4877 scope.go:117] "RemoveContainer" containerID="793925a2d2bcdc32855957ec2a7488bda261e25aa749028fb609ae0ca451af05" Jan 28 17:06:07 crc kubenswrapper[4877]: E0128 17:06:07.954796 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Jan 28 17:06:07 crc kubenswrapper[4877]: E0128 17:06:07.954867 4877 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Jan 28 17:06:07 crc kubenswrapper[4877]: E0128 17:06:07.955059 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vx5cg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-t55qb_openstack(4b5478d7-63fb-41e2-89c8-1d27290a9844): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:06:07 crc kubenswrapper[4877]: E0128 17:06:07.956243 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-t55qb" podUID="4b5478d7-63fb-41e2-89c8-1d27290a9844" Jan 28 17:06:08 crc kubenswrapper[4877]: I0128 17:06:08.053900 4877 scope.go:117] "RemoveContainer" containerID="8e0fb86256a77a22a05970915add645a2a85e79bc329b01962f4b68e24315021" Jan 28 17:06:08 crc kubenswrapper[4877]: I0128 17:06:08.139096 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b2e9ef8-3e88-4cec-bda6-2c143670f73a","Type":"ContainerStarted","Data":"5d394735938a52e4a2f78b93d5968c52c3cbee65a90742ea5b0e14693fae71d1"} Jan 28 17:06:08 crc kubenswrapper[4877]: I0128 17:06:08.143816 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxcqh" event={"ID":"dd412c38-2df2-4881-8293-4866583158c8","Type":"ContainerStarted","Data":"17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85"} Jan 28 17:06:08 crc kubenswrapper[4877]: E0128 17:06:08.185444 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-t55qb" podUID="4b5478d7-63fb-41e2-89c8-1d27290a9844" Jan 28 17:06:08 crc kubenswrapper[4877]: W0128 17:06:08.435205 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45c31f3a_8427_4523_b741_1b317afc8ee6.slice/crio-471121eeb3f9ae3e5736613430582454fed45b4f734670a725ce55320d62d672 WatchSource:0}: Error finding container 471121eeb3f9ae3e5736613430582454fed45b4f734670a725ce55320d62d672: Status 404 returned error can't find the container with id 471121eeb3f9ae3e5736613430582454fed45b4f734670a725ce55320d62d672 Jan 28 17:06:08 crc kubenswrapper[4877]: I0128 17:06:08.440300 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 17:06:08 crc kubenswrapper[4877]: W0128 17:06:08.461956 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1dcfedb4_c672_4dc9_86bd_340f07ccc805.slice/crio-9c93fccaa5a4c7b3a22d99944c9eae5751f5b3060b2792b4e286334a6eab54e3 WatchSource:0}: Error finding container 9c93fccaa5a4c7b3a22d99944c9eae5751f5b3060b2792b4e286334a6eab54e3: Status 404 returned error can't find the container with id 9c93fccaa5a4c7b3a22d99944c9eae5751f5b3060b2792b4e286334a6eab54e3 Jan 28 17:06:08 crc kubenswrapper[4877]: I0128 17:06:08.465597 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-2"] Jan 28 17:06:09 crc kubenswrapper[4877]: I0128 17:06:09.181843 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"c7f0577d72207bd89c21d8eb9092633da62ac51cdf714b1a1e5d82fcc0a22555"} Jan 28 17:06:09 crc kubenswrapper[4877]: I0128 17:06:09.186745 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45c31f3a-8427-4523-b741-1b317afc8ee6","Type":"ContainerStarted","Data":"471121eeb3f9ae3e5736613430582454fed45b4f734670a725ce55320d62d672"} Jan 28 17:06:09 crc kubenswrapper[4877]: I0128 17:06:09.188459 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"1dcfedb4-c672-4dc9-86bd-340f07ccc805","Type":"ContainerStarted","Data":"9c93fccaa5a4c7b3a22d99944c9eae5751f5b3060b2792b4e286334a6eab54e3"} Jan 28 17:06:09 crc kubenswrapper[4877]: I0128 17:06:09.226171 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wxcqh" podStartSLOduration=21.233096406 podStartE2EDuration="1m1.22615104s" podCreationTimestamp="2026-01-28 17:05:08 +0000 UTC" firstStartedPulling="2026-01-28 17:05:10.261780612 +0000 UTC m=+1813.820107500" lastFinishedPulling="2026-01-28 17:05:50.254835246 +0000 UTC m=+1853.813162134" observedRunningTime="2026-01-28 17:06:09.222581373 +0000 UTC m=+1872.780908261" watchObservedRunningTime="2026-01-28 17:06:09.22615104 +0000 UTC m=+1872.784477928" Jan 28 17:06:10 crc kubenswrapper[4877]: I0128 17:06:10.204259 4877 generic.go:334] "Generic (PLEG): container finished" podID="e5954741-e854-495a-9122-509fcfa1ec6c" containerID="c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882" exitCode=0 Jan 28 17:06:10 crc kubenswrapper[4877]: I0128 17:06:10.204344 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fsrk2" event={"ID":"e5954741-e854-495a-9122-509fcfa1ec6c","Type":"ContainerDied","Data":"c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882"} Jan 28 17:06:14 crc kubenswrapper[4877]: I0128 17:06:14.256129 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"1dcfedb4-c672-4dc9-86bd-340f07ccc805","Type":"ContainerStarted","Data":"8795659c2a5f8946b7c3f792710f2a4b3f9b26e5f945400f66c2acc15222cf58"} Jan 28 17:06:14 crc kubenswrapper[4877]: I0128 17:06:14.261571 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45c31f3a-8427-4523-b741-1b317afc8ee6","Type":"ContainerStarted","Data":"9e696eb29f3e812aa4889e66f6339912ad7f7667ea83ec13b180e999e77aab20"} Jan 28 17:06:18 crc kubenswrapper[4877]: I0128 17:06:18.312881 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b2e9ef8-3e88-4cec-bda6-2c143670f73a","Type":"ContainerStarted","Data":"8e5ebb728811ad1acbf50d9182edf0cf9022a1a331f71ea66affdcd5081d5a4a"} Jan 28 17:06:18 crc kubenswrapper[4877]: I0128 17:06:18.316680 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fsrk2" event={"ID":"e5954741-e854-495a-9122-509fcfa1ec6c","Type":"ContainerStarted","Data":"cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4"} Jan 28 17:06:18 crc kubenswrapper[4877]: I0128 17:06:18.348166 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fsrk2" podStartSLOduration=4.289332692 podStartE2EDuration="1m13.348145867s" podCreationTimestamp="2026-01-28 17:05:05 +0000 UTC" firstStartedPulling="2026-01-28 17:05:08.22195321 +0000 UTC m=+1811.780280098" lastFinishedPulling="2026-01-28 17:06:17.280766385 +0000 UTC m=+1880.839093273" observedRunningTime="2026-01-28 17:06:18.335919688 +0000 UTC m=+1881.894246576" watchObservedRunningTime="2026-01-28 17:06:18.348145867 +0000 UTC m=+1881.906472765" Jan 28 17:06:18 crc kubenswrapper[4877]: I0128 17:06:18.567571 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:06:18 crc kubenswrapper[4877]: I0128 17:06:18.567922 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:06:19 crc kubenswrapper[4877]: I0128 17:06:19.370020 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b2e9ef8-3e88-4cec-bda6-2c143670f73a","Type":"ContainerStarted","Data":"87b94023b04b1f975ca9b1ebfd5f044f68f64a750a4100122626c9d1366212ad"} Jan 28 17:06:19 crc kubenswrapper[4877]: I0128 17:06:19.618531 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wxcqh" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="registry-server" probeResult="failure" output=< Jan 28 17:06:19 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:06:19 crc kubenswrapper[4877]: > Jan 28 17:06:20 crc kubenswrapper[4877]: I0128 17:06:20.367610 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b2e9ef8-3e88-4cec-bda6-2c143670f73a","Type":"ContainerStarted","Data":"e8ed317a00adad6c99c4486d08a2cf1da7f620ef0b4939410402efde934327ec"} Jan 28 17:06:22 crc kubenswrapper[4877]: I0128 17:06:22.413311 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5b2e9ef8-3e88-4cec-bda6-2c143670f73a","Type":"ContainerStarted","Data":"bf906b6e8848cfd72e44763c505c556a28c8f3571e2906748cc1c7ec657605a0"} Jan 28 17:06:22 crc kubenswrapper[4877]: I0128 17:06:22.413893 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 17:06:22 crc kubenswrapper[4877]: I0128 17:06:22.448156 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=30.278663661 podStartE2EDuration="44.4481371s" podCreationTimestamp="2026-01-28 17:05:38 +0000 UTC" firstStartedPulling="2026-01-28 17:06:07.479706518 +0000 UTC m=+1871.038033416" lastFinishedPulling="2026-01-28 17:06:21.649179967 +0000 UTC m=+1885.207506855" observedRunningTime="2026-01-28 17:06:22.436163479 +0000 UTC m=+1885.994490387" watchObservedRunningTime="2026-01-28 17:06:22.4481371 +0000 UTC m=+1886.006463988" Jan 28 17:06:23 crc kubenswrapper[4877]: I0128 17:06:23.426783 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-t55qb" event={"ID":"4b5478d7-63fb-41e2-89c8-1d27290a9844","Type":"ContainerStarted","Data":"f0366d9ca67cf0912ad66cb7997b6b6efd6fa01d20752020bd4d5e5173d2a2f5"} Jan 28 17:06:23 crc kubenswrapper[4877]: I0128 17:06:23.450545 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-t55qb" podStartSLOduration=4.093225807 podStartE2EDuration="57.450519593s" podCreationTimestamp="2026-01-28 17:05:26 +0000 UTC" firstStartedPulling="2026-01-28 17:05:29.199169312 +0000 UTC m=+1832.757496200" lastFinishedPulling="2026-01-28 17:06:22.556463098 +0000 UTC m=+1886.114789986" observedRunningTime="2026-01-28 17:06:23.443689589 +0000 UTC m=+1887.002016487" watchObservedRunningTime="2026-01-28 17:06:23.450519593 +0000 UTC m=+1887.008846481" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.720513 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-zh95j"] Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.723257 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.730309 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.751238 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-zh95j"] Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.903712 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-config\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.903757 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.903798 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.904517 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-svc\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.904701 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.904843 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:24 crc kubenswrapper[4877]: I0128 17:06:24.904909 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk4rl\" (UniqueName: \"kubernetes.io/projected/78b79384-f7b1-4d7d-b63e-aa72d31db598-kube-api-access-dk4rl\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.007312 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-svc\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.007391 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.007461 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.007527 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk4rl\" (UniqueName: \"kubernetes.io/projected/78b79384-f7b1-4d7d-b63e-aa72d31db598-kube-api-access-dk4rl\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.007716 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-config\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.007762 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.007844 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.008218 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-svc\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.008391 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.008699 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.008948 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-config\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.008975 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.009279 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.034306 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk4rl\" (UniqueName: \"kubernetes.io/projected/78b79384-f7b1-4d7d-b63e-aa72d31db598-kube-api-access-dk4rl\") pod \"dnsmasq-dns-68df85789f-zh95j\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.049638 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:25 crc kubenswrapper[4877]: I0128 17:06:25.701346 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-zh95j"] Jan 28 17:06:26 crc kubenswrapper[4877]: I0128 17:06:26.190234 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:06:26 crc kubenswrapper[4877]: I0128 17:06:26.190651 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:06:26 crc kubenswrapper[4877]: I0128 17:06:26.482933 4877 generic.go:334] "Generic (PLEG): container finished" podID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerID="cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3" exitCode=0 Jan 28 17:06:26 crc kubenswrapper[4877]: I0128 17:06:26.483041 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-zh95j" event={"ID":"78b79384-f7b1-4d7d-b63e-aa72d31db598","Type":"ContainerDied","Data":"cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3"} Jan 28 17:06:26 crc kubenswrapper[4877]: I0128 17:06:26.483254 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-zh95j" event={"ID":"78b79384-f7b1-4d7d-b63e-aa72d31db598","Type":"ContainerStarted","Data":"3d824542a5ff44d8af89076ac8d1719d18bee59810e5e1118dcbeb913a46ab2a"} Jan 28 17:06:27 crc kubenswrapper[4877]: I0128 17:06:27.249461 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:06:27 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:06:27 crc kubenswrapper[4877]: > Jan 28 17:06:27 crc kubenswrapper[4877]: I0128 17:06:27.496204 4877 generic.go:334] "Generic (PLEG): container finished" podID="4b5478d7-63fb-41e2-89c8-1d27290a9844" containerID="f0366d9ca67cf0912ad66cb7997b6b6efd6fa01d20752020bd4d5e5173d2a2f5" exitCode=0 Jan 28 17:06:27 crc kubenswrapper[4877]: I0128 17:06:27.496321 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-t55qb" event={"ID":"4b5478d7-63fb-41e2-89c8-1d27290a9844","Type":"ContainerDied","Data":"f0366d9ca67cf0912ad66cb7997b6b6efd6fa01d20752020bd4d5e5173d2a2f5"} Jan 28 17:06:27 crc kubenswrapper[4877]: I0128 17:06:27.498922 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-zh95j" event={"ID":"78b79384-f7b1-4d7d-b63e-aa72d31db598","Type":"ContainerStarted","Data":"9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546"} Jan 28 17:06:27 crc kubenswrapper[4877]: I0128 17:06:27.499055 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.038417 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-t55qb" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.067938 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68df85789f-zh95j" podStartSLOduration=5.067913738 podStartE2EDuration="5.067913738s" podCreationTimestamp="2026-01-28 17:06:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:06:27.541547356 +0000 UTC m=+1891.099874254" watchObservedRunningTime="2026-01-28 17:06:29.067913738 +0000 UTC m=+1892.626240626" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.234117 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-config-data\") pod \"4b5478d7-63fb-41e2-89c8-1d27290a9844\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.234315 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx5cg\" (UniqueName: \"kubernetes.io/projected/4b5478d7-63fb-41e2-89c8-1d27290a9844-kube-api-access-vx5cg\") pod \"4b5478d7-63fb-41e2-89c8-1d27290a9844\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.234376 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-combined-ca-bundle\") pod \"4b5478d7-63fb-41e2-89c8-1d27290a9844\" (UID: \"4b5478d7-63fb-41e2-89c8-1d27290a9844\") " Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.240160 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b5478d7-63fb-41e2-89c8-1d27290a9844-kube-api-access-vx5cg" (OuterVolumeSpecName: "kube-api-access-vx5cg") pod "4b5478d7-63fb-41e2-89c8-1d27290a9844" (UID: "4b5478d7-63fb-41e2-89c8-1d27290a9844"). InnerVolumeSpecName "kube-api-access-vx5cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.271746 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b5478d7-63fb-41e2-89c8-1d27290a9844" (UID: "4b5478d7-63fb-41e2-89c8-1d27290a9844"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.339360 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx5cg\" (UniqueName: \"kubernetes.io/projected/4b5478d7-63fb-41e2-89c8-1d27290a9844-kube-api-access-vx5cg\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.339919 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.368579 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-config-data" (OuterVolumeSpecName: "config-data") pod "4b5478d7-63fb-41e2-89c8-1d27290a9844" (UID: "4b5478d7-63fb-41e2-89c8-1d27290a9844"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.444877 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b5478d7-63fb-41e2-89c8-1d27290a9844-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.520747 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-t55qb" event={"ID":"4b5478d7-63fb-41e2-89c8-1d27290a9844","Type":"ContainerDied","Data":"b59bf0d834ce591487f9fc0a44b36ed72b859d78f7bac6f96769d0bb1836c1e4"} Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.520785 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b59bf0d834ce591487f9fc0a44b36ed72b859d78f7bac6f96769d0bb1836c1e4" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.520840 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-t55qb" Jan 28 17:06:29 crc kubenswrapper[4877]: I0128 17:06:29.636679 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wxcqh" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="registry-server" probeResult="failure" output=< Jan 28 17:06:29 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:06:29 crc kubenswrapper[4877]: > Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.545662 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-67779f5b76-zbfrz"] Jan 28 17:06:30 crc kubenswrapper[4877]: E0128 17:06:30.546623 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b5478d7-63fb-41e2-89c8-1d27290a9844" containerName="heat-db-sync" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.546638 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b5478d7-63fb-41e2-89c8-1d27290a9844" containerName="heat-db-sync" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.546901 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b5478d7-63fb-41e2-89c8-1d27290a9844" containerName="heat-db-sync" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.547914 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.566610 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-67779f5b76-zbfrz"] Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.634397 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5dc759f499-fhs49"] Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.636622 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.663821 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-fb57644db-zwfdw"] Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.666141 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.683220 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-config-data\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.691799 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-config-data-custom\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.692065 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbw2s\" (UniqueName: \"kubernetes.io/projected/a0fcf8ff-605f-4717-b909-75253469f5b2-kube-api-access-zbw2s\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.692258 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-combined-ca-bundle\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.693629 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5dc759f499-fhs49"] Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.746764 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-fb57644db-zwfdw"] Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.805505 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-internal-tls-certs\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.805800 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-config-data\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.805984 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-combined-ca-bundle\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806075 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-config-data\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806165 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-config-data-custom\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806302 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-config-data-custom\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806421 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-config-data-custom\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806558 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbw2s\" (UniqueName: \"kubernetes.io/projected/a0fcf8ff-605f-4717-b909-75253469f5b2-kube-api-access-zbw2s\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806678 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-combined-ca-bundle\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806759 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgmjf\" (UniqueName: \"kubernetes.io/projected/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-kube-api-access-qgmjf\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806848 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-internal-tls-certs\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.806952 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-config-data\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.807049 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-public-tls-certs\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.807137 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbn89\" (UniqueName: \"kubernetes.io/projected/f6d901b8-6129-41e7-8099-466e2bd7a134-kube-api-access-fbn89\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.807216 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-combined-ca-bundle\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.807306 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-public-tls-certs\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.813223 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-config-data\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.814319 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-combined-ca-bundle\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.814407 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0fcf8ff-605f-4717-b909-75253469f5b2-config-data-custom\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.827191 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbw2s\" (UniqueName: \"kubernetes.io/projected/a0fcf8ff-605f-4717-b909-75253469f5b2-kube-api-access-zbw2s\") pod \"heat-engine-67779f5b76-zbfrz\" (UID: \"a0fcf8ff-605f-4717-b909-75253469f5b2\") " pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.870937 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.910062 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-config-data-custom\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.910288 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgmjf\" (UniqueName: \"kubernetes.io/projected/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-kube-api-access-qgmjf\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.910497 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-internal-tls-certs\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.910846 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-config-data\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.910995 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-public-tls-certs\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.911073 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbn89\" (UniqueName: \"kubernetes.io/projected/f6d901b8-6129-41e7-8099-466e2bd7a134-kube-api-access-fbn89\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.911109 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-combined-ca-bundle\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.911160 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-public-tls-certs\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.911211 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-internal-tls-certs\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.911237 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-config-data\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.911517 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-combined-ca-bundle\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.911638 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-config-data-custom\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.917590 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-config-data-custom\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.918503 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-internal-tls-certs\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.919407 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-config-data-custom\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.919976 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-config-data\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.921345 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-public-tls-certs\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.921840 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-internal-tls-certs\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.922557 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-config-data\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.925380 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6d901b8-6129-41e7-8099-466e2bd7a134-combined-ca-bundle\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.928019 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-combined-ca-bundle\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.933904 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-public-tls-certs\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.934733 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbn89\" (UniqueName: \"kubernetes.io/projected/f6d901b8-6129-41e7-8099-466e2bd7a134-kube-api-access-fbn89\") pod \"heat-api-5dc759f499-fhs49\" (UID: \"f6d901b8-6129-41e7-8099-466e2bd7a134\") " pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:30 crc kubenswrapper[4877]: I0128 17:06:30.939902 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgmjf\" (UniqueName: \"kubernetes.io/projected/49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe-kube-api-access-qgmjf\") pod \"heat-cfnapi-fb57644db-zwfdw\" (UID: \"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe\") " pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:31 crc kubenswrapper[4877]: I0128 17:06:31.016923 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:31 crc kubenswrapper[4877]: I0128 17:06:31.037200 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:31 crc kubenswrapper[4877]: I0128 17:06:31.462004 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-67779f5b76-zbfrz"] Jan 28 17:06:31 crc kubenswrapper[4877]: I0128 17:06:31.549132 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-67779f5b76-zbfrz" event={"ID":"a0fcf8ff-605f-4717-b909-75253469f5b2","Type":"ContainerStarted","Data":"b17a4ae2683433c178fd11cd901c4bb365aa1c7285e21ebc946ce2dd40e0fd6a"} Jan 28 17:06:31 crc kubenswrapper[4877]: I0128 17:06:31.683831 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-fb57644db-zwfdw"] Jan 28 17:06:31 crc kubenswrapper[4877]: I0128 17:06:31.715440 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5dc759f499-fhs49"] Jan 28 17:06:32 crc kubenswrapper[4877]: I0128 17:06:32.579802 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-fb57644db-zwfdw" event={"ID":"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe","Type":"ContainerStarted","Data":"bc77b9ed4e8dadc611b8756b5b708f0a253850cc48433d0b6d5d9dad7c4daf39"} Jan 28 17:06:32 crc kubenswrapper[4877]: I0128 17:06:32.582247 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-67779f5b76-zbfrz" event={"ID":"a0fcf8ff-605f-4717-b909-75253469f5b2","Type":"ContainerStarted","Data":"d68a0217396f95c2e51c5172aa7e6a1428e6a51a016415cbac5551771923d50c"} Jan 28 17:06:32 crc kubenswrapper[4877]: I0128 17:06:32.582403 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:32 crc kubenswrapper[4877]: I0128 17:06:32.584222 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5dc759f499-fhs49" event={"ID":"f6d901b8-6129-41e7-8099-466e2bd7a134","Type":"ContainerStarted","Data":"390c203db75bedf414a1316369e9bafaf879df5516d715fd838ed726dbf416b8"} Jan 28 17:06:32 crc kubenswrapper[4877]: I0128 17:06:32.607304 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-67779f5b76-zbfrz" podStartSLOduration=2.607280035 podStartE2EDuration="2.607280035s" podCreationTimestamp="2026-01-28 17:06:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:06:32.600589585 +0000 UTC m=+1896.158916483" watchObservedRunningTime="2026-01-28 17:06:32.607280035 +0000 UTC m=+1896.165606923" Jan 28 17:06:34 crc kubenswrapper[4877]: I0128 17:06:34.616333 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-fb57644db-zwfdw" event={"ID":"49d9f4b4-ba8a-493f-bd07-d0bc5ead09fe","Type":"ContainerStarted","Data":"505137c98a863988eb4e23b5f5df7b6cc8c9cb0f491f4522ab3f868af8e8ddf3"} Jan 28 17:06:34 crc kubenswrapper[4877]: I0128 17:06:34.616939 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:34 crc kubenswrapper[4877]: I0128 17:06:34.617908 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5dc759f499-fhs49" event={"ID":"f6d901b8-6129-41e7-8099-466e2bd7a134","Type":"ContainerStarted","Data":"4292c1e6c554a0d9bc7c47bc603e5a80e44c26dca291e9aa701830118069fdba"} Jan 28 17:06:34 crc kubenswrapper[4877]: I0128 17:06:34.618884 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:34 crc kubenswrapper[4877]: I0128 17:06:34.657717 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5dc759f499-fhs49" podStartSLOduration=2.919918084 podStartE2EDuration="4.657693739s" podCreationTimestamp="2026-01-28 17:06:30 +0000 UTC" firstStartedPulling="2026-01-28 17:06:31.692870002 +0000 UTC m=+1895.251196890" lastFinishedPulling="2026-01-28 17:06:33.430645657 +0000 UTC m=+1896.988972545" observedRunningTime="2026-01-28 17:06:34.637497025 +0000 UTC m=+1898.195823923" watchObservedRunningTime="2026-01-28 17:06:34.657693739 +0000 UTC m=+1898.216020627" Jan 28 17:06:34 crc kubenswrapper[4877]: I0128 17:06:34.667189 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-fb57644db-zwfdw" podStartSLOduration=2.930604522 podStartE2EDuration="4.667170024s" podCreationTimestamp="2026-01-28 17:06:30 +0000 UTC" firstStartedPulling="2026-01-28 17:06:31.695609066 +0000 UTC m=+1895.253935954" lastFinishedPulling="2026-01-28 17:06:33.432174568 +0000 UTC m=+1896.990501456" observedRunningTime="2026-01-28 17:06:34.660830394 +0000 UTC m=+1898.219157272" watchObservedRunningTime="2026-01-28 17:06:34.667170024 +0000 UTC m=+1898.225496912" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.050642 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.145340 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-87n8s"] Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.145621 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" podUID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerName="dnsmasq-dns" containerID="cri-o://e11d2af44f807b9e51bae7e84ac049753035ecf68bd26cd8546332748c71bc3d" gracePeriod=10 Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.398695 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-5kvgx"] Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.407001 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.463979 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-5kvgx"] Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.565107 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.565185 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-dns-svc\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.565245 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-config\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.565337 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.565405 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.565554 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.565667 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxdlf\" (UniqueName: \"kubernetes.io/projected/0d1bf829-7b27-4b01-b680-4ef76563912d-kube-api-access-qxdlf\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.636635 4877 generic.go:334] "Generic (PLEG): container finished" podID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerID="e11d2af44f807b9e51bae7e84ac049753035ecf68bd26cd8546332748c71bc3d" exitCode=0 Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.636749 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" event={"ID":"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1","Type":"ContainerDied","Data":"e11d2af44f807b9e51bae7e84ac049753035ecf68bd26cd8546332748c71bc3d"} Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.667870 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-dns-svc\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.667958 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-config\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.668235 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.668317 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.668381 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.668424 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxdlf\" (UniqueName: \"kubernetes.io/projected/0d1bf829-7b27-4b01-b680-4ef76563912d-kube-api-access-qxdlf\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.668590 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.668927 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-dns-svc\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.669020 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-config\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.669192 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.672144 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.672690 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.673025 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d1bf829-7b27-4b01-b680-4ef76563912d-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.696134 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxdlf\" (UniqueName: \"kubernetes.io/projected/0d1bf829-7b27-4b01-b680-4ef76563912d-kube-api-access-qxdlf\") pod \"dnsmasq-dns-bb85b8995-5kvgx\" (UID: \"0d1bf829-7b27-4b01-b680-4ef76563912d\") " pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.738032 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.968088 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.992326 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-swift-storage-0\") pod \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.992394 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-sb\") pod \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.992432 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-nb\") pod \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.992520 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-config\") pod \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.992560 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-svc\") pod \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " Jan 28 17:06:35 crc kubenswrapper[4877]: I0128 17:06:35.992669 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rprxh\" (UniqueName: \"kubernetes.io/projected/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-kube-api-access-rprxh\") pod \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\" (UID: \"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1\") " Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:35.998711 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-kube-api-access-rprxh" (OuterVolumeSpecName: "kube-api-access-rprxh") pod "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" (UID: "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1"). InnerVolumeSpecName "kube-api-access-rprxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.109345 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rprxh\" (UniqueName: \"kubernetes.io/projected/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-kube-api-access-rprxh\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.112527 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" (UID: "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.115161 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-config" (OuterVolumeSpecName: "config") pod "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" (UID: "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.123845 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" (UID: "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.124727 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" (UID: "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.128360 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" (UID: "f0f7ed22-d3da-47ce-b61c-53a0b7a878e1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.211644 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.211940 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.211952 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.211964 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.211976 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:36 crc kubenswrapper[4877]: W0128 17:06:36.333777 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d1bf829_7b27_4b01_b680_4ef76563912d.slice/crio-f4d8fc65f019162f39faae64796928271be939e2b9de581935297adae8e33e20 WatchSource:0}: Error finding container f4d8fc65f019162f39faae64796928271be939e2b9de581935297adae8e33e20: Status 404 returned error can't find the container with id f4d8fc65f019162f39faae64796928271be939e2b9de581935297adae8e33e20 Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.335181 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-5kvgx"] Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.655252 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" event={"ID":"f0f7ed22-d3da-47ce-b61c-53a0b7a878e1","Type":"ContainerDied","Data":"51ad788d57f4ac4bdd2dbe60ef093822fec90714291413b224f111524db2a02c"} Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.655527 4877 scope.go:117] "RemoveContainer" containerID="e11d2af44f807b9e51bae7e84ac049753035ecf68bd26cd8546332748c71bc3d" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.655682 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-87n8s" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.660980 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" event={"ID":"0d1bf829-7b27-4b01-b680-4ef76563912d","Type":"ContainerStarted","Data":"0eb104895a6131f56373f04e7443d09c707d1c06cfdff0ece79e13ffac360f62"} Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.661017 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" event={"ID":"0d1bf829-7b27-4b01-b680-4ef76563912d","Type":"ContainerStarted","Data":"f4d8fc65f019162f39faae64796928271be939e2b9de581935297adae8e33e20"} Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.691492 4877 scope.go:117] "RemoveContainer" containerID="0b4579ff3b99fcbed0d1f86794cee9170c201767c7c903ee64e7aabc65f024ce" Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.725178 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-87n8s"] Jan 28 17:06:36 crc kubenswrapper[4877]: I0128 17:06:36.736533 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-87n8s"] Jan 28 17:06:37 crc kubenswrapper[4877]: I0128 17:06:37.248763 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:06:37 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:06:37 crc kubenswrapper[4877]: > Jan 28 17:06:37 crc kubenswrapper[4877]: I0128 17:06:37.348082 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" path="/var/lib/kubelet/pods/f0f7ed22-d3da-47ce-b61c-53a0b7a878e1/volumes" Jan 28 17:06:37 crc kubenswrapper[4877]: I0128 17:06:37.676862 4877 generic.go:334] "Generic (PLEG): container finished" podID="0d1bf829-7b27-4b01-b680-4ef76563912d" containerID="0eb104895a6131f56373f04e7443d09c707d1c06cfdff0ece79e13ffac360f62" exitCode=0 Jan 28 17:06:37 crc kubenswrapper[4877]: I0128 17:06:37.676905 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" event={"ID":"0d1bf829-7b27-4b01-b680-4ef76563912d","Type":"ContainerDied","Data":"0eb104895a6131f56373f04e7443d09c707d1c06cfdff0ece79e13ffac360f62"} Jan 28 17:06:38 crc kubenswrapper[4877]: I0128 17:06:38.626062 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:06:38 crc kubenswrapper[4877]: I0128 17:06:38.686446 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:06:38 crc kubenswrapper[4877]: I0128 17:06:38.694419 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" event={"ID":"0d1bf829-7b27-4b01-b680-4ef76563912d","Type":"ContainerStarted","Data":"886d50415b9a1d0bf6f613b5ef6e277888da2c64f2ab2b06dc2c805aa1c3718b"} Jan 28 17:06:38 crc kubenswrapper[4877]: I0128 17:06:38.746693 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" podStartSLOduration=3.746674067 podStartE2EDuration="3.746674067s" podCreationTimestamp="2026-01-28 17:06:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:06:38.742981447 +0000 UTC m=+1902.301308365" watchObservedRunningTime="2026-01-28 17:06:38.746674067 +0000 UTC m=+1902.305001015" Jan 28 17:06:39 crc kubenswrapper[4877]: I0128 17:06:39.355614 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 17:06:39 crc kubenswrapper[4877]: I0128 17:06:39.446797 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wxcqh"] Jan 28 17:06:39 crc kubenswrapper[4877]: I0128 17:06:39.704275 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:39 crc kubenswrapper[4877]: I0128 17:06:39.704258 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wxcqh" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="registry-server" containerID="cri-o://17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85" gracePeriod=2 Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.361139 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.515703 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-utilities\") pod \"dd412c38-2df2-4881-8293-4866583158c8\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.515894 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-catalog-content\") pod \"dd412c38-2df2-4881-8293-4866583158c8\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.516212 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ss6p5\" (UniqueName: \"kubernetes.io/projected/dd412c38-2df2-4881-8293-4866583158c8-kube-api-access-ss6p5\") pod \"dd412c38-2df2-4881-8293-4866583158c8\" (UID: \"dd412c38-2df2-4881-8293-4866583158c8\") " Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.516573 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-utilities" (OuterVolumeSpecName: "utilities") pod "dd412c38-2df2-4881-8293-4866583158c8" (UID: "dd412c38-2df2-4881-8293-4866583158c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.517819 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.523794 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd412c38-2df2-4881-8293-4866583158c8-kube-api-access-ss6p5" (OuterVolumeSpecName: "kube-api-access-ss6p5") pod "dd412c38-2df2-4881-8293-4866583158c8" (UID: "dd412c38-2df2-4881-8293-4866583158c8"). InnerVolumeSpecName "kube-api-access-ss6p5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.569923 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd412c38-2df2-4881-8293-4866583158c8" (UID: "dd412c38-2df2-4881-8293-4866583158c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.620320 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ss6p5\" (UniqueName: \"kubernetes.io/projected/dd412c38-2df2-4881-8293-4866583158c8-kube-api-access-ss6p5\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.620403 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd412c38-2df2-4881-8293-4866583158c8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.721128 4877 generic.go:334] "Generic (PLEG): container finished" podID="dd412c38-2df2-4881-8293-4866583158c8" containerID="17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85" exitCode=0 Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.721183 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxcqh" event={"ID":"dd412c38-2df2-4881-8293-4866583158c8","Type":"ContainerDied","Data":"17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85"} Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.721241 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxcqh" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.721259 4877 scope.go:117] "RemoveContainer" containerID="17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.721246 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxcqh" event={"ID":"dd412c38-2df2-4881-8293-4866583158c8","Type":"ContainerDied","Data":"80f6b3fabd7081f0eab8dcf583397ef377c566cb803d1d07c132ce9bdef6949e"} Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.768139 4877 scope.go:117] "RemoveContainer" containerID="024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.768039 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wxcqh"] Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.780655 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wxcqh"] Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.800954 4877 scope.go:117] "RemoveContainer" containerID="2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.869190 4877 scope.go:117] "RemoveContainer" containerID="17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85" Jan 28 17:06:40 crc kubenswrapper[4877]: E0128 17:06:40.869863 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85\": container with ID starting with 17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85 not found: ID does not exist" containerID="17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.869916 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85"} err="failed to get container status \"17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85\": rpc error: code = NotFound desc = could not find container \"17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85\": container with ID starting with 17b32d0f267b94e372948fa9ad6972a0bf04e267d8043f3f5963a2031b080b85 not found: ID does not exist" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.869949 4877 scope.go:117] "RemoveContainer" containerID="024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee" Jan 28 17:06:40 crc kubenswrapper[4877]: E0128 17:06:40.870314 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee\": container with ID starting with 024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee not found: ID does not exist" containerID="024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.870350 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee"} err="failed to get container status \"024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee\": rpc error: code = NotFound desc = could not find container \"024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee\": container with ID starting with 024077c7f5f6c23ba15d071b3cfa4c8efba5c044dc78a056721d598ebe6335ee not found: ID does not exist" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.870373 4877 scope.go:117] "RemoveContainer" containerID="2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf" Jan 28 17:06:40 crc kubenswrapper[4877]: E0128 17:06:40.870615 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf\": container with ID starting with 2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf not found: ID does not exist" containerID="2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf" Jan 28 17:06:40 crc kubenswrapper[4877]: I0128 17:06:40.870634 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf"} err="failed to get container status \"2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf\": rpc error: code = NotFound desc = could not find container \"2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf\": container with ID starting with 2c3c6004478354312f47a09f3b737bc4b48c3a48e4bd473269a3fe29cb695ebf not found: ID does not exist" Jan 28 17:06:41 crc kubenswrapper[4877]: I0128 17:06:41.348592 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd412c38-2df2-4881-8293-4866583158c8" path="/var/lib/kubelet/pods/dd412c38-2df2-4881-8293-4866583158c8/volumes" Jan 28 17:06:43 crc kubenswrapper[4877]: I0128 17:06:43.210453 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-5dc759f499-fhs49" Jan 28 17:06:43 crc kubenswrapper[4877]: I0128 17:06:43.211108 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-fb57644db-zwfdw" Jan 28 17:06:43 crc kubenswrapper[4877]: I0128 17:06:43.316706 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-66df9b7b7d-nkmrc"] Jan 28 17:06:43 crc kubenswrapper[4877]: I0128 17:06:43.317254 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-66df9b7b7d-nkmrc" podUID="c6e0515b-5a47-473c-859a-8cbc2f02d959" containerName="heat-api" containerID="cri-o://c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669" gracePeriod=60 Jan 28 17:06:43 crc kubenswrapper[4877]: I0128 17:06:43.368971 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-759d866587-ps7h5"] Jan 28 17:06:43 crc kubenswrapper[4877]: I0128 17:06:43.374118 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-759d866587-ps7h5" podUID="8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" containerName="heat-cfnapi" containerID="cri-o://f92c9d65a444b3b7245b0b3547044751868343c3e441bcaec87c5ba38f246655" gracePeriod=60 Jan 28 17:06:45 crc kubenswrapper[4877]: I0128 17:06:45.739743 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bb85b8995-5kvgx" Jan 28 17:06:45 crc kubenswrapper[4877]: I0128 17:06:45.878571 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-zh95j"] Jan 28 17:06:45 crc kubenswrapper[4877]: I0128 17:06:45.878826 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68df85789f-zh95j" podUID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerName="dnsmasq-dns" containerID="cri-o://9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546" gracePeriod=10 Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.553013 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-759d866587-ps7h5" podUID="8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.228:8000/healthcheck\": read tcp 10.217.0.2:60250->10.217.0.228:8000: read: connection reset by peer" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.577975 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.587834 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-sb\") pod \"78b79384-f7b1-4d7d-b63e-aa72d31db598\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.587956 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-svc\") pod \"78b79384-f7b1-4d7d-b63e-aa72d31db598\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.588107 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dk4rl\" (UniqueName: \"kubernetes.io/projected/78b79384-f7b1-4d7d-b63e-aa72d31db598-kube-api-access-dk4rl\") pod \"78b79384-f7b1-4d7d-b63e-aa72d31db598\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.588171 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-swift-storage-0\") pod \"78b79384-f7b1-4d7d-b63e-aa72d31db598\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.588239 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-nb\") pod \"78b79384-f7b1-4d7d-b63e-aa72d31db598\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.588328 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-config\") pod \"78b79384-f7b1-4d7d-b63e-aa72d31db598\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.588406 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-openstack-edpm-ipam\") pod \"78b79384-f7b1-4d7d-b63e-aa72d31db598\" (UID: \"78b79384-f7b1-4d7d-b63e-aa72d31db598\") " Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.603527 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78b79384-f7b1-4d7d-b63e-aa72d31db598-kube-api-access-dk4rl" (OuterVolumeSpecName: "kube-api-access-dk4rl") pod "78b79384-f7b1-4d7d-b63e-aa72d31db598" (UID: "78b79384-f7b1-4d7d-b63e-aa72d31db598"). InnerVolumeSpecName "kube-api-access-dk4rl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.692647 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dk4rl\" (UniqueName: \"kubernetes.io/projected/78b79384-f7b1-4d7d-b63e-aa72d31db598-kube-api-access-dk4rl\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.708290 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-config" (OuterVolumeSpecName: "config") pod "78b79384-f7b1-4d7d-b63e-aa72d31db598" (UID: "78b79384-f7b1-4d7d-b63e-aa72d31db598"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.727134 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "78b79384-f7b1-4d7d-b63e-aa72d31db598" (UID: "78b79384-f7b1-4d7d-b63e-aa72d31db598"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.742079 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "78b79384-f7b1-4d7d-b63e-aa72d31db598" (UID: "78b79384-f7b1-4d7d-b63e-aa72d31db598"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.768140 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "78b79384-f7b1-4d7d-b63e-aa72d31db598" (UID: "78b79384-f7b1-4d7d-b63e-aa72d31db598"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.776866 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "78b79384-f7b1-4d7d-b63e-aa72d31db598" (UID: "78b79384-f7b1-4d7d-b63e-aa72d31db598"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.791031 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "78b79384-f7b1-4d7d-b63e-aa72d31db598" (UID: "78b79384-f7b1-4d7d-b63e-aa72d31db598"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.796877 4877 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.796918 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.796930 4877 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-config\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.796940 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.796949 4877 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.796957 4877 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78b79384-f7b1-4d7d-b63e-aa72d31db598-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.819891 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-66df9b7b7d-nkmrc" podUID="c6e0515b-5a47-473c-859a-8cbc2f02d959" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.227:8004/healthcheck\": read tcp 10.217.0.2:57078->10.217.0.227:8004: read: connection reset by peer" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.861041 4877 generic.go:334] "Generic (PLEG): container finished" podID="1dcfedb4-c672-4dc9-86bd-340f07ccc805" containerID="8795659c2a5f8946b7c3f792710f2a4b3f9b26e5f945400f66c2acc15222cf58" exitCode=0 Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.861102 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"1dcfedb4-c672-4dc9-86bd-340f07ccc805","Type":"ContainerDied","Data":"8795659c2a5f8946b7c3f792710f2a4b3f9b26e5f945400f66c2acc15222cf58"} Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.867843 4877 generic.go:334] "Generic (PLEG): container finished" podID="8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" containerID="f92c9d65a444b3b7245b0b3547044751868343c3e441bcaec87c5ba38f246655" exitCode=0 Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.867909 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-759d866587-ps7h5" event={"ID":"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb","Type":"ContainerDied","Data":"f92c9d65a444b3b7245b0b3547044751868343c3e441bcaec87c5ba38f246655"} Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.875273 4877 generic.go:334] "Generic (PLEG): container finished" podID="45c31f3a-8427-4523-b741-1b317afc8ee6" containerID="9e696eb29f3e812aa4889e66f6339912ad7f7667ea83ec13b180e999e77aab20" exitCode=0 Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.875356 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45c31f3a-8427-4523-b741-1b317afc8ee6","Type":"ContainerDied","Data":"9e696eb29f3e812aa4889e66f6339912ad7f7667ea83ec13b180e999e77aab20"} Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.883860 4877 generic.go:334] "Generic (PLEG): container finished" podID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerID="9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546" exitCode=0 Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.883919 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-zh95j" event={"ID":"78b79384-f7b1-4d7d-b63e-aa72d31db598","Type":"ContainerDied","Data":"9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546"} Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.883950 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-zh95j" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.883983 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-zh95j" event={"ID":"78b79384-f7b1-4d7d-b63e-aa72d31db598","Type":"ContainerDied","Data":"3d824542a5ff44d8af89076ac8d1719d18bee59810e5e1118dcbeb913a46ab2a"} Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.884003 4877 scope.go:117] "RemoveContainer" containerID="9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.949619 4877 scope.go:117] "RemoveContainer" containerID="cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3" Jan 28 17:06:46 crc kubenswrapper[4877]: I0128 17:06:46.996381 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-zh95j"] Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.009039 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-zh95j"] Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.038524 4877 scope.go:117] "RemoveContainer" containerID="9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546" Jan 28 17:06:47 crc kubenswrapper[4877]: E0128 17:06:47.038911 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546\": container with ID starting with 9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546 not found: ID does not exist" containerID="9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.038942 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546"} err="failed to get container status \"9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546\": rpc error: code = NotFound desc = could not find container \"9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546\": container with ID starting with 9a7f99ee6b3a4c600dc4b19e54c2085465a52b84239f26d15eb3f91996c73546 not found: ID does not exist" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.038963 4877 scope.go:117] "RemoveContainer" containerID="cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3" Jan 28 17:06:47 crc kubenswrapper[4877]: E0128 17:06:47.039242 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3\": container with ID starting with cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3 not found: ID does not exist" containerID="cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.039262 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3"} err="failed to get container status \"cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3\": rpc error: code = NotFound desc = could not find container \"cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3\": container with ID starting with cf28f934447194b3a0887a9a3f14f1092674c037710d82390626c311572f04e3 not found: ID does not exist" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.271955 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:06:47 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:06:47 crc kubenswrapper[4877]: > Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.327915 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.372279 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78b79384-f7b1-4d7d-b63e-aa72d31db598" path="/var/lib/kubelet/pods/78b79384-f7b1-4d7d-b63e-aa72d31db598/volumes" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.429412 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-public-tls-certs\") pod \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.429894 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data\") pod \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.429923 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-internal-tls-certs\") pod \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.429951 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66ghp\" (UniqueName: \"kubernetes.io/projected/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-kube-api-access-66ghp\") pod \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.430107 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data-custom\") pod \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.430146 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-combined-ca-bundle\") pod \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\" (UID: \"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.439048 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-kube-api-access-66ghp" (OuterVolumeSpecName: "kube-api-access-66ghp") pod "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" (UID: "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb"). InnerVolumeSpecName "kube-api-access-66ghp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.442035 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" (UID: "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.487693 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" (UID: "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.504499 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" (UID: "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.522397 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" (UID: "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.524521 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data" (OuterVolumeSpecName: "config-data") pod "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" (UID: "8dac0c96-51a4-47f1-9f12-b1e35b7b8deb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.533719 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.533843 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.533859 4877 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.533870 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.533883 4877 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.533921 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66ghp\" (UniqueName: \"kubernetes.io/projected/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb-kube-api-access-66ghp\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.833355 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.898150 4877 generic.go:334] "Generic (PLEG): container finished" podID="c6e0515b-5a47-473c-859a-8cbc2f02d959" containerID="c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669" exitCode=0 Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.898203 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66df9b7b7d-nkmrc" event={"ID":"c6e0515b-5a47-473c-859a-8cbc2f02d959","Type":"ContainerDied","Data":"c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669"} Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.898237 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-66df9b7b7d-nkmrc" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.898251 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-66df9b7b7d-nkmrc" event={"ID":"c6e0515b-5a47-473c-859a-8cbc2f02d959","Type":"ContainerDied","Data":"a0c9028679cc2730e135fb337f6b72dac0ded0eec36425c946bdd4afea3d9fc5"} Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.898273 4877 scope.go:117] "RemoveContainer" containerID="c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.900864 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45c31f3a-8427-4523-b741-1b317afc8ee6","Type":"ContainerStarted","Data":"fcc4077ca0c09ac5ba9d51def716207e59078886766ffa5962b9a61cfbf830e4"} Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.901108 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.905944 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-2" event={"ID":"1dcfedb4-c672-4dc9-86bd-340f07ccc805","Type":"ContainerStarted","Data":"13832f79fc7ee131f2c7857dad9bad76807b38c3278702f1438ca2fd98a2addd"} Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.906149 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-2" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.909278 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-759d866587-ps7h5" event={"ID":"8dac0c96-51a4-47f1-9f12-b1e35b7b8deb","Type":"ContainerDied","Data":"9b7fdea0232027f4582133767532a274601356e3896bf47a343f5c3ed10193c2"} Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.909335 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-759d866587-ps7h5" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.931419 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=41.931397093 podStartE2EDuration="41.931397093s" podCreationTimestamp="2026-01-28 17:06:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:06:47.927400745 +0000 UTC m=+1911.485727653" watchObservedRunningTime="2026-01-28 17:06:47.931397093 +0000 UTC m=+1911.489723981" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.940178 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data\") pod \"c6e0515b-5a47-473c-859a-8cbc2f02d959\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.940278 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data-custom\") pod \"c6e0515b-5a47-473c-859a-8cbc2f02d959\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.940338 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-internal-tls-certs\") pod \"c6e0515b-5a47-473c-859a-8cbc2f02d959\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.940366 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mchb6\" (UniqueName: \"kubernetes.io/projected/c6e0515b-5a47-473c-859a-8cbc2f02d959-kube-api-access-mchb6\") pod \"c6e0515b-5a47-473c-859a-8cbc2f02d959\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.940427 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-public-tls-certs\") pod \"c6e0515b-5a47-473c-859a-8cbc2f02d959\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.940599 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-combined-ca-bundle\") pod \"c6e0515b-5a47-473c-859a-8cbc2f02d959\" (UID: \"c6e0515b-5a47-473c-859a-8cbc2f02d959\") " Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.941145 4877 scope.go:117] "RemoveContainer" containerID="c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669" Jan 28 17:06:47 crc kubenswrapper[4877]: E0128 17:06:47.942779 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669\": container with ID starting with c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669 not found: ID does not exist" containerID="c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.942809 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669"} err="failed to get container status \"c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669\": rpc error: code = NotFound desc = could not find container \"c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669\": container with ID starting with c81ae7b288101b7675dca1b535598910208cb29eacef6a65a7cfd6460ff87669 not found: ID does not exist" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.942838 4877 scope.go:117] "RemoveContainer" containerID="f92c9d65a444b3b7245b0b3547044751868343c3e441bcaec87c5ba38f246655" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.953777 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c6e0515b-5a47-473c-859a-8cbc2f02d959" (UID: "c6e0515b-5a47-473c-859a-8cbc2f02d959"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.953963 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6e0515b-5a47-473c-859a-8cbc2f02d959-kube-api-access-mchb6" (OuterVolumeSpecName: "kube-api-access-mchb6") pod "c6e0515b-5a47-473c-859a-8cbc2f02d959" (UID: "c6e0515b-5a47-473c-859a-8cbc2f02d959"). InnerVolumeSpecName "kube-api-access-mchb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.956613 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-2" podStartSLOduration=41.956593641 podStartE2EDuration="41.956593641s" podCreationTimestamp="2026-01-28 17:06:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:06:47.952269015 +0000 UTC m=+1911.510595923" watchObservedRunningTime="2026-01-28 17:06:47.956593641 +0000 UTC m=+1911.514920529" Jan 28 17:06:47 crc kubenswrapper[4877]: I0128 17:06:47.990498 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-759d866587-ps7h5"] Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.004738 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-759d866587-ps7h5"] Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.021100 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6e0515b-5a47-473c-859a-8cbc2f02d959" (UID: "c6e0515b-5a47-473c-859a-8cbc2f02d959"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.021804 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c6e0515b-5a47-473c-859a-8cbc2f02d959" (UID: "c6e0515b-5a47-473c-859a-8cbc2f02d959"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.029132 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data" (OuterVolumeSpecName: "config-data") pod "c6e0515b-5a47-473c-859a-8cbc2f02d959" (UID: "c6e0515b-5a47-473c-859a-8cbc2f02d959"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.033850 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c6e0515b-5a47-473c-859a-8cbc2f02d959" (UID: "c6e0515b-5a47-473c-859a-8cbc2f02d959"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.045995 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.046025 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.046035 4877 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.046044 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mchb6\" (UniqueName: \"kubernetes.io/projected/c6e0515b-5a47-473c-859a-8cbc2f02d959-kube-api-access-mchb6\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.046054 4877 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.046062 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6e0515b-5a47-473c-859a-8cbc2f02d959-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.257859 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-66df9b7b7d-nkmrc"] Jan 28 17:06:48 crc kubenswrapper[4877]: I0128 17:06:48.276679 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-66df9b7b7d-nkmrc"] Jan 28 17:06:48 crc kubenswrapper[4877]: E0128 17:06:48.282054 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6e0515b_5a47_473c_859a_8cbc2f02d959.slice\": RecentStats: unable to find data in memory cache]" Jan 28 17:06:48 crc kubenswrapper[4877]: E0128 17:06:48.287741 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6e0515b_5a47_473c_859a_8cbc2f02d959.slice/crio-a0c9028679cc2730e135fb337f6b72dac0ded0eec36425c946bdd4afea3d9fc5\": RecentStats: unable to find data in memory cache]" Jan 28 17:06:49 crc kubenswrapper[4877]: I0128 17:06:49.364521 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" path="/var/lib/kubelet/pods/8dac0c96-51a4-47f1-9f12-b1e35b7b8deb/volumes" Jan 28 17:06:49 crc kubenswrapper[4877]: I0128 17:06:49.365562 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6e0515b-5a47-473c-859a-8cbc2f02d959" path="/var/lib/kubelet/pods/c6e0515b-5a47-473c-859a-8cbc2f02d959/volumes" Jan 28 17:06:50 crc kubenswrapper[4877]: I0128 17:06:50.923835 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-67779f5b76-zbfrz" Jan 28 17:06:50 crc kubenswrapper[4877]: I0128 17:06:50.975240 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6d5767f7f9-8pfmz"] Jan 28 17:06:50 crc kubenswrapper[4877]: I0128 17:06:50.975460 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-6d5767f7f9-8pfmz" podUID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" containerName="heat-engine" containerID="cri-o://ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" gracePeriod=60 Jan 28 17:06:51 crc kubenswrapper[4877]: E0128 17:06:51.751448 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:06:51 crc kubenswrapper[4877]: E0128 17:06:51.759623 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:06:51 crc kubenswrapper[4877]: E0128 17:06:51.761136 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:06:51 crc kubenswrapper[4877]: E0128 17:06:51.761234 4877 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6d5767f7f9-8pfmz" podUID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" containerName="heat-engine" Jan 28 17:06:57 crc kubenswrapper[4877]: I0128 17:06:57.245996 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:06:57 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:06:57 crc kubenswrapper[4877]: > Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.244544 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp"] Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245592 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="registry-server" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245611 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="registry-server" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245636 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" containerName="heat-cfnapi" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245643 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" containerName="heat-cfnapi" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245649 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerName="init" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245654 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerName="init" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245667 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6e0515b-5a47-473c-859a-8cbc2f02d959" containerName="heat-api" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245672 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6e0515b-5a47-473c-859a-8cbc2f02d959" containerName="heat-api" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245693 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="extract-content" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245699 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="extract-content" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245722 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerName="init" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245728 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerName="init" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245744 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="extract-utilities" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245750 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="extract-utilities" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245764 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerName="dnsmasq-dns" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245770 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerName="dnsmasq-dns" Jan 28 17:07:00 crc kubenswrapper[4877]: E0128 17:07:00.245782 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerName="dnsmasq-dns" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.245788 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerName="dnsmasq-dns" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.246045 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6e0515b-5a47-473c-859a-8cbc2f02d959" containerName="heat-api" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.246067 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="78b79384-f7b1-4d7d-b63e-aa72d31db598" containerName="dnsmasq-dns" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.246080 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0f7ed22-d3da-47ce-b61c-53a0b7a878e1" containerName="dnsmasq-dns" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.246090 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd412c38-2df2-4881-8293-4866583158c8" containerName="registry-server" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.246107 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dac0c96-51a4-47f1-9f12-b1e35b7b8deb" containerName="heat-cfnapi" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.246947 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.249190 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.249350 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.261281 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.261575 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.288405 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp"] Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.387520 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.388028 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v694g\" (UniqueName: \"kubernetes.io/projected/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-kube-api-access-v694g\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.388190 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.388502 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.490341 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v694g\" (UniqueName: \"kubernetes.io/projected/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-kube-api-access-v694g\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.490435 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.490673 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.490776 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.497239 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.497781 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.500941 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.511121 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v694g\" (UniqueName: \"kubernetes.io/projected/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-kube-api-access-v694g\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:00 crc kubenswrapper[4877]: I0128 17:07:00.579740 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:01 crc kubenswrapper[4877]: I0128 17:07:01.273649 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp"] Jan 28 17:07:01 crc kubenswrapper[4877]: W0128 17:07:01.275599 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2ce6077_c81f_45f8_beb5_99d0a4df9c40.slice/crio-f532892fbe8d1c570f9f81f324cbbc0dad82ad096088bec309414feae4f26855 WatchSource:0}: Error finding container f532892fbe8d1c570f9f81f324cbbc0dad82ad096088bec309414feae4f26855: Status 404 returned error can't find the container with id f532892fbe8d1c570f9f81f324cbbc0dad82ad096088bec309414feae4f26855 Jan 28 17:07:01 crc kubenswrapper[4877]: E0128 17:07:01.750262 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:07:01 crc kubenswrapper[4877]: E0128 17:07:01.752858 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:07:01 crc kubenswrapper[4877]: E0128 17:07:01.754778 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 17:07:01 crc kubenswrapper[4877]: E0128 17:07:01.754903 4877 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6d5767f7f9-8pfmz" podUID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" containerName="heat-engine" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.093444 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" event={"ID":"a2ce6077-c81f-45f8-beb5-99d0a4df9c40","Type":"ContainerStarted","Data":"f532892fbe8d1c570f9f81f324cbbc0dad82ad096088bec309414feae4f26855"} Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.097000 4877 generic.go:334] "Generic (PLEG): container finished" podID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" exitCode=0 Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.097175 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6d5767f7f9-8pfmz" event={"ID":"f8dea33a-3c8f-43eb-af20-df530ec7a89d","Type":"ContainerDied","Data":"ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575"} Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.306576 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-79fbr"] Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.327269 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.330170 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.335425 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-79fbr"] Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.442638 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.444547 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-config-data\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.444663 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfznv\" (UniqueName: \"kubernetes.io/projected/3b6f7515-689a-4a2e-807d-dde5d82975a7-kube-api-access-vfznv\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.445070 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-scripts\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.445291 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-combined-ca-bundle\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.546952 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data\") pod \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.547411 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-combined-ca-bundle\") pod \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.547685 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbg7z\" (UniqueName: \"kubernetes.io/projected/f8dea33a-3c8f-43eb-af20-df530ec7a89d-kube-api-access-pbg7z\") pod \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.547741 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data-custom\") pod \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\" (UID: \"f8dea33a-3c8f-43eb-af20-df530ec7a89d\") " Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.548865 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfznv\" (UniqueName: \"kubernetes.io/projected/3b6f7515-689a-4a2e-807d-dde5d82975a7-kube-api-access-vfznv\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.549082 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-scripts\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.549278 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-combined-ca-bundle\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.549432 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-config-data\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.556826 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-scripts\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.557736 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-combined-ca-bundle\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.560735 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-config-data\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.570002 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8dea33a-3c8f-43eb-af20-df530ec7a89d-kube-api-access-pbg7z" (OuterVolumeSpecName: "kube-api-access-pbg7z") pod "f8dea33a-3c8f-43eb-af20-df530ec7a89d" (UID: "f8dea33a-3c8f-43eb-af20-df530ec7a89d"). InnerVolumeSpecName "kube-api-access-pbg7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.573467 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f8dea33a-3c8f-43eb-af20-df530ec7a89d" (UID: "f8dea33a-3c8f-43eb-af20-df530ec7a89d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.576069 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfznv\" (UniqueName: \"kubernetes.io/projected/3b6f7515-689a-4a2e-807d-dde5d82975a7-kube-api-access-vfznv\") pod \"aodh-db-sync-79fbr\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.605677 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8dea33a-3c8f-43eb-af20-df530ec7a89d" (UID: "f8dea33a-3c8f-43eb-af20-df530ec7a89d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.652032 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbg7z\" (UniqueName: \"kubernetes.io/projected/f8dea33a-3c8f-43eb-af20-df530ec7a89d-kube-api-access-pbg7z\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.652160 4877 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.652175 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.654284 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data" (OuterVolumeSpecName: "config-data") pod "f8dea33a-3c8f-43eb-af20-df530ec7a89d" (UID: "f8dea33a-3c8f-43eb-af20-df530ec7a89d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.755613 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8dea33a-3c8f-43eb-af20-df530ec7a89d-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:02 crc kubenswrapper[4877]: I0128 17:07:02.756042 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:03 crc kubenswrapper[4877]: I0128 17:07:03.117380 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6d5767f7f9-8pfmz" event={"ID":"f8dea33a-3c8f-43eb-af20-df530ec7a89d","Type":"ContainerDied","Data":"cc3891ff96e99de23f3cdb47f29f5ca6b03a49e9e3450d7df4b9de5aae6ba84a"} Jan 28 17:07:03 crc kubenswrapper[4877]: I0128 17:07:03.117769 4877 scope.go:117] "RemoveContainer" containerID="ade098d032d08a508cf4dbcaeea95d338b9065d42a33b376c48a2146f81cd575" Jan 28 17:07:03 crc kubenswrapper[4877]: I0128 17:07:03.117518 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6d5767f7f9-8pfmz" Jan 28 17:07:03 crc kubenswrapper[4877]: I0128 17:07:03.238327 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6d5767f7f9-8pfmz"] Jan 28 17:07:03 crc kubenswrapper[4877]: I0128 17:07:03.256501 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-6d5767f7f9-8pfmz"] Jan 28 17:07:03 crc kubenswrapper[4877]: I0128 17:07:03.349876 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" path="/var/lib/kubelet/pods/f8dea33a-3c8f-43eb-af20-df530ec7a89d/volumes" Jan 28 17:07:03 crc kubenswrapper[4877]: I0128 17:07:03.351336 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-79fbr"] Jan 28 17:07:03 crc kubenswrapper[4877]: W0128 17:07:03.359158 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3b6f7515_689a_4a2e_807d_dde5d82975a7.slice/crio-8c30505cdbae0fed9500f4a37228c641e83d3af7b4d3060a52247899ad349eef WatchSource:0}: Error finding container 8c30505cdbae0fed9500f4a37228c641e83d3af7b4d3060a52247899ad349eef: Status 404 returned error can't find the container with id 8c30505cdbae0fed9500f4a37228c641e83d3af7b4d3060a52247899ad349eef Jan 28 17:07:04 crc kubenswrapper[4877]: I0128 17:07:04.161780 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-79fbr" event={"ID":"3b6f7515-689a-4a2e-807d-dde5d82975a7","Type":"ContainerStarted","Data":"8c30505cdbae0fed9500f4a37228c641e83d3af7b4d3060a52247899ad349eef"} Jan 28 17:07:06 crc kubenswrapper[4877]: I0128 17:07:06.933801 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-2" podUID="1dcfedb4-c672-4dc9-86bd-340f07ccc805" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.13:5671: connect: connection refused" Jan 28 17:07:06 crc kubenswrapper[4877]: I0128 17:07:06.968634 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 28 17:07:07 crc kubenswrapper[4877]: I0128 17:07:07.254126 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:07:07 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:07:07 crc kubenswrapper[4877]: > Jan 28 17:07:07 crc kubenswrapper[4877]: I0128 17:07:07.778892 4877 scope.go:117] "RemoveContainer" containerID="cbc204a5a57dcadecfb4b7b6c50f46294dcef52bc5d1dc252d23dc039d2fb078" Jan 28 17:07:09 crc kubenswrapper[4877]: I0128 17:07:09.278255 4877 scope.go:117] "RemoveContainer" containerID="91c27be8ad2a3cedce2f5a618a9a9bace58c38236fb7bf48451a18fba21e8ee1" Jan 28 17:07:16 crc kubenswrapper[4877]: I0128 17:07:16.935883 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-2" Jan 28 17:07:17 crc kubenswrapper[4877]: I0128 17:07:17.017860 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 17:07:17 crc kubenswrapper[4877]: I0128 17:07:17.239733 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:07:17 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:07:17 crc kubenswrapper[4877]: > Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.244729 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest" Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.245316 4877 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 28 17:07:23 crc kubenswrapper[4877]: container &Container{Name:repo-setup-edpm-deployment-openstack-edpm-ipam,Image:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,Command:[],Args:[ansible-runner run /runner -p playbook.yaml -i repo-setup-edpm-deployment-openstack-edpm-ipam],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:ANSIBLE_VERBOSITY,Value:2,ValueFrom:nil,},EnvVar{Name:RUNNER_PLAYBOOK,Value: Jan 28 17:07:23 crc kubenswrapper[4877]: - hosts: all Jan 28 17:07:23 crc kubenswrapper[4877]: strategy: linear Jan 28 17:07:23 crc kubenswrapper[4877]: tasks: Jan 28 17:07:23 crc kubenswrapper[4877]: - name: Enable podified-repos Jan 28 17:07:23 crc kubenswrapper[4877]: become: true Jan 28 17:07:23 crc kubenswrapper[4877]: ansible.builtin.shell: | Jan 28 17:07:23 crc kubenswrapper[4877]: set -euxo pipefail Jan 28 17:07:23 crc kubenswrapper[4877]: pushd /var/tmp Jan 28 17:07:23 crc kubenswrapper[4877]: curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz Jan 28 17:07:23 crc kubenswrapper[4877]: pushd repo-setup-main Jan 28 17:07:23 crc kubenswrapper[4877]: python3 -m venv ./venv Jan 28 17:07:23 crc kubenswrapper[4877]: PBR_VERSION=0.0.0 ./venv/bin/pip install ./ Jan 28 17:07:23 crc kubenswrapper[4877]: ./venv/bin/repo-setup current-podified -b antelope Jan 28 17:07:23 crc kubenswrapper[4877]: popd Jan 28 17:07:23 crc kubenswrapper[4877]: rm -rf repo-setup-main Jan 28 17:07:23 crc kubenswrapper[4877]: Jan 28 17:07:23 crc kubenswrapper[4877]: Jan 28 17:07:23 crc kubenswrapper[4877]: ,ValueFrom:nil,},EnvVar{Name:RUNNER_EXTRA_VARS,Value: Jan 28 17:07:23 crc kubenswrapper[4877]: edpm_override_hosts: openstack-edpm-ipam Jan 28 17:07:23 crc kubenswrapper[4877]: edpm_service_type: repo-setup Jan 28 17:07:23 crc kubenswrapper[4877]: Jan 28 17:07:23 crc kubenswrapper[4877]: Jan 28 17:07:23 crc kubenswrapper[4877]: ,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:repo-setup-combined-ca-bundle,ReadOnly:false,MountPath:/var/lib/openstack/cacerts/repo-setup,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key-openstack-edpm-ipam,ReadOnly:false,MountPath:/runner/env/ssh_key/ssh_key_openstack-edpm-ipam,SubPath:ssh_key_openstack-edpm-ipam,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:inventory,ReadOnly:false,MountPath:/runner/inventory/hosts,SubPath:inventory,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v694g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:openstack-aee-default-env,},Optional:*true,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp_openstack(a2ce6077-c81f-45f8-beb5-99d0a4df9c40): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Jan 28 17:07:23 crc kubenswrapper[4877]: > logger="UnhandledError" Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.246890 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" podUID="a2ce6077-c81f-45f8-beb5-99d0a4df9c40" Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.418679 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest\\\"\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" podUID="a2ce6077-c81f-45f8-beb5-99d0a4df9c40" Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.968722 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested" Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.969016 4877 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested" Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.969127 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:aodh-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:AodhPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:AodhPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:aodh-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vfznv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42402,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod aodh-db-sync-79fbr_openstack(3b6f7515-689a-4a2e-807d-dde5d82975a7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:07:23 crc kubenswrapper[4877]: E0128 17:07:23.970179 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/aodh-db-sync-79fbr" podUID="3b6f7515-689a-4a2e-807d-dde5d82975a7" Jan 28 17:07:24 crc kubenswrapper[4877]: E0128 17:07:24.429357 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested\\\"\"" pod="openstack/aodh-db-sync-79fbr" podUID="3b6f7515-689a-4a2e-807d-dde5d82975a7" Jan 28 17:07:24 crc kubenswrapper[4877]: I0128 17:07:24.975353 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-1" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" containerID="cri-o://ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906" gracePeriod=604793 Jan 28 17:07:27 crc kubenswrapper[4877]: I0128 17:07:27.248277 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:07:27 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:07:27 crc kubenswrapper[4877]: > Jan 28 17:07:30 crc kubenswrapper[4877]: I0128 17:07:30.850818 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-1" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Jan 28 17:07:31 crc kubenswrapper[4877]: I0128 17:07:31.960320 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.078826 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-server-conf\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.079088 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-confd\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.079135 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wbrp\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-kube-api-access-7wbrp\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.079266 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-config-data\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.079290 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-pod-info\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.080298 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.080357 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-plugins-conf\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.080399 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-erlang-cookie\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.080444 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-plugins\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.080495 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-erlang-cookie-secret\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.080544 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-tls\") pod \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\" (UID: \"2f642a61-430e-4dfc-b6b6-3ee68161eaf6\") " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.083320 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.086504 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.088032 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.093613 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-kube-api-access-7wbrp" (OuterVolumeSpecName: "kube-api-access-7wbrp") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "kube-api-access-7wbrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.094677 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.095403 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.112572 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-pod-info" (OuterVolumeSpecName: "pod-info") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.146894 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288" (OuterVolumeSpecName: "persistence") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "pvc-35962983-c2f9-469f-a143-b7309d73e288". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.172836 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-config-data" (OuterVolumeSpecName: "config-data") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186781 4877 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186817 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186828 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186837 4877 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186845 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186854 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wbrp\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-kube-api-access-7wbrp\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186862 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186870 4877 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.186899 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") on node \"crc\" " Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.251308 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-server-conf" (OuterVolumeSpecName: "server-conf") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.254185 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.254376 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-35962983-c2f9-469f-a143-b7309d73e288" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288") on node "crc" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.289398 4877 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.289436 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.321721 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "2f642a61-430e-4dfc-b6b6-3ee68161eaf6" (UID: "2f642a61-430e-4dfc-b6b6-3ee68161eaf6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.393609 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2f642a61-430e-4dfc-b6b6-3ee68161eaf6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.518117 4877 generic.go:334] "Generic (PLEG): container finished" podID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerID="ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906" exitCode=0 Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.518175 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2f642a61-430e-4dfc-b6b6-3ee68161eaf6","Type":"ContainerDied","Data":"ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906"} Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.518201 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"2f642a61-430e-4dfc-b6b6-3ee68161eaf6","Type":"ContainerDied","Data":"fc92e019da570fb319c5d8e9f56baa7afaa013a1f0778ac7d11e9ad9595d8042"} Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.518219 4877 scope.go:117] "RemoveContainer" containerID="ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.518359 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.591016 4877 scope.go:117] "RemoveContainer" containerID="c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.597979 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.612425 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.631320 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 17:07:32 crc kubenswrapper[4877]: E0128 17:07:32.631993 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="setup-container" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.632020 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="setup-container" Jan 28 17:07:32 crc kubenswrapper[4877]: E0128 17:07:32.632035 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" containerName="heat-engine" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.632042 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" containerName="heat-engine" Jan 28 17:07:32 crc kubenswrapper[4877]: E0128 17:07:32.632069 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.632075 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.632293 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" containerName="rabbitmq" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.632335 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8dea33a-3c8f-43eb-af20-df530ec7a89d" containerName="heat-engine" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.633753 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.646476 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.660117 4877 scope.go:117] "RemoveContainer" containerID="ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906" Jan 28 17:07:32 crc kubenswrapper[4877]: E0128 17:07:32.663632 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906\": container with ID starting with ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906 not found: ID does not exist" containerID="ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.664086 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906"} err="failed to get container status \"ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906\": rpc error: code = NotFound desc = could not find container \"ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906\": container with ID starting with ed817e61aa6dcba55287d14e0f9a4cb6e5ab9f3012181d75a559e09e65498906 not found: ID does not exist" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.664174 4877 scope.go:117] "RemoveContainer" containerID="c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310" Jan 28 17:07:32 crc kubenswrapper[4877]: E0128 17:07:32.664527 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310\": container with ID starting with c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310 not found: ID does not exist" containerID="c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.664560 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310"} err="failed to get container status \"c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310\": rpc error: code = NotFound desc = could not find container \"c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310\": container with ID starting with c54362878b34558cb31609a92d4a2216db79c8bb56753c66aa44c767d855a310 not found: ID does not exist" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.701755 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.702422 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703007 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffc790a6-8bf2-4088-8ec5-1720988944ae-pod-info\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703132 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703278 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffc790a6-8bf2-4088-8ec5-1720988944ae-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703346 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-server-conf\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703452 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703536 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mn4n\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-kube-api-access-4mn4n\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703588 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-config-data\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703808 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.703906 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807456 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-config-data\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807583 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807629 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807666 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807743 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807773 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffc790a6-8bf2-4088-8ec5-1720988944ae-pod-info\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807797 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807882 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffc790a6-8bf2-4088-8ec5-1720988944ae-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.807918 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-server-conf\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.808170 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.808201 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mn4n\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-kube-api-access-4mn4n\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.809038 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.810218 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-config-data\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.810852 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-plugins\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.812096 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-server-conf\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.812762 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.812880 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9fcbc32b1e89d88c9c45f66191d0bf2f4114e554cfde863b999975859aec8c96/globalmount\"" pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.814611 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ffc790a6-8bf2-4088-8ec5-1720988944ae-pod-info\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.815270 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ffc790a6-8bf2-4088-8ec5-1720988944ae-plugins-conf\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.816533 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ffc790a6-8bf2-4088-8ec5-1720988944ae-erlang-cookie-secret\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.816724 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-confd\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.817457 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-rabbitmq-tls\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.835599 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mn4n\" (UniqueName: \"kubernetes.io/projected/ffc790a6-8bf2-4088-8ec5-1720988944ae-kube-api-access-4mn4n\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.958767 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-35962983-c2f9-469f-a143-b7309d73e288\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35962983-c2f9-469f-a143-b7309d73e288\") pod \"rabbitmq-server-1\" (UID: \"ffc790a6-8bf2-4088-8ec5-1720988944ae\") " pod="openstack/rabbitmq-server-1" Jan 28 17:07:32 crc kubenswrapper[4877]: I0128 17:07:32.987763 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-1" Jan 28 17:07:33 crc kubenswrapper[4877]: I0128 17:07:33.348548 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f642a61-430e-4dfc-b6b6-3ee68161eaf6" path="/var/lib/kubelet/pods/2f642a61-430e-4dfc-b6b6-3ee68161eaf6/volumes" Jan 28 17:07:33 crc kubenswrapper[4877]: I0128 17:07:33.588907 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-1"] Jan 28 17:07:33 crc kubenswrapper[4877]: W0128 17:07:33.592380 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffc790a6_8bf2_4088_8ec5_1720988944ae.slice/crio-da9f113c641c4a86fb6bccc6e34cab98351e2513e605989cd6627f6e16beb10c WatchSource:0}: Error finding container da9f113c641c4a86fb6bccc6e34cab98351e2513e605989cd6627f6e16beb10c: Status 404 returned error can't find the container with id da9f113c641c4a86fb6bccc6e34cab98351e2513e605989cd6627f6e16beb10c Jan 28 17:07:34 crc kubenswrapper[4877]: I0128 17:07:34.564359 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"ffc790a6-8bf2-4088-8ec5-1720988944ae","Type":"ContainerStarted","Data":"da9f113c641c4a86fb6bccc6e34cab98351e2513e605989cd6627f6e16beb10c"} Jan 28 17:07:36 crc kubenswrapper[4877]: I0128 17:07:36.591691 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-79fbr" event={"ID":"3b6f7515-689a-4a2e-807d-dde5d82975a7","Type":"ContainerStarted","Data":"75d6cd604269124281c2a85b0e721da2d26f8d9277a0181961032ca67f8a0ec5"} Jan 28 17:07:36 crc kubenswrapper[4877]: I0128 17:07:36.594331 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"ffc790a6-8bf2-4088-8ec5-1720988944ae","Type":"ContainerStarted","Data":"8e7fd88207cf2fc03e843580a2bc45136018689faa935ef818f3426d8a1618e8"} Jan 28 17:07:36 crc kubenswrapper[4877]: I0128 17:07:36.622109 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-79fbr" podStartSLOduration=2.391767745 podStartE2EDuration="34.622087361s" podCreationTimestamp="2026-01-28 17:07:02 +0000 UTC" firstStartedPulling="2026-01-28 17:07:03.369262972 +0000 UTC m=+1926.927589860" lastFinishedPulling="2026-01-28 17:07:35.599582588 +0000 UTC m=+1959.157909476" observedRunningTime="2026-01-28 17:07:36.608904476 +0000 UTC m=+1960.167231374" watchObservedRunningTime="2026-01-28 17:07:36.622087361 +0000 UTC m=+1960.180414249" Jan 28 17:07:37 crc kubenswrapper[4877]: I0128 17:07:37.257865 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:07:37 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:07:37 crc kubenswrapper[4877]: > Jan 28 17:07:39 crc kubenswrapper[4877]: I0128 17:07:39.630917 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" event={"ID":"a2ce6077-c81f-45f8-beb5-99d0a4df9c40","Type":"ContainerStarted","Data":"ef2cc96ddb8d016396a2342a8aa7300f2d273d31b35d41e6e224ee9a3f28caf6"} Jan 28 17:07:39 crc kubenswrapper[4877]: I0128 17:07:39.663169 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" podStartSLOduration=1.93201678 podStartE2EDuration="39.663145621s" podCreationTimestamp="2026-01-28 17:07:00 +0000 UTC" firstStartedPulling="2026-01-28 17:07:01.279467648 +0000 UTC m=+1924.837794536" lastFinishedPulling="2026-01-28 17:07:39.010596489 +0000 UTC m=+1962.568923377" observedRunningTime="2026-01-28 17:07:39.658901216 +0000 UTC m=+1963.217228114" watchObservedRunningTime="2026-01-28 17:07:39.663145621 +0000 UTC m=+1963.221472509" Jan 28 17:07:42 crc kubenswrapper[4877]: I0128 17:07:42.668736 4877 generic.go:334] "Generic (PLEG): container finished" podID="3b6f7515-689a-4a2e-807d-dde5d82975a7" containerID="75d6cd604269124281c2a85b0e721da2d26f8d9277a0181961032ca67f8a0ec5" exitCode=0 Jan 28 17:07:42 crc kubenswrapper[4877]: I0128 17:07:42.668817 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-79fbr" event={"ID":"3b6f7515-689a-4a2e-807d-dde5d82975a7","Type":"ContainerDied","Data":"75d6cd604269124281c2a85b0e721da2d26f8d9277a0181961032ca67f8a0ec5"} Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.134705 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.229004 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfznv\" (UniqueName: \"kubernetes.io/projected/3b6f7515-689a-4a2e-807d-dde5d82975a7-kube-api-access-vfznv\") pod \"3b6f7515-689a-4a2e-807d-dde5d82975a7\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.229123 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-config-data\") pod \"3b6f7515-689a-4a2e-807d-dde5d82975a7\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.229271 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-scripts\") pod \"3b6f7515-689a-4a2e-807d-dde5d82975a7\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.229372 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-combined-ca-bundle\") pod \"3b6f7515-689a-4a2e-807d-dde5d82975a7\" (UID: \"3b6f7515-689a-4a2e-807d-dde5d82975a7\") " Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.249648 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b6f7515-689a-4a2e-807d-dde5d82975a7-kube-api-access-vfznv" (OuterVolumeSpecName: "kube-api-access-vfznv") pod "3b6f7515-689a-4a2e-807d-dde5d82975a7" (UID: "3b6f7515-689a-4a2e-807d-dde5d82975a7"). InnerVolumeSpecName "kube-api-access-vfznv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.249900 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-scripts" (OuterVolumeSpecName: "scripts") pod "3b6f7515-689a-4a2e-807d-dde5d82975a7" (UID: "3b6f7515-689a-4a2e-807d-dde5d82975a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.274273 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3b6f7515-689a-4a2e-807d-dde5d82975a7" (UID: "3b6f7515-689a-4a2e-807d-dde5d82975a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.276511 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-config-data" (OuterVolumeSpecName: "config-data") pod "3b6f7515-689a-4a2e-807d-dde5d82975a7" (UID: "3b6f7515-689a-4a2e-807d-dde5d82975a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.331935 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfznv\" (UniqueName: \"kubernetes.io/projected/3b6f7515-689a-4a2e-807d-dde5d82975a7-kube-api-access-vfznv\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.331969 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.331982 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.331993 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b6f7515-689a-4a2e-807d-dde5d82975a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.694459 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-79fbr" event={"ID":"3b6f7515-689a-4a2e-807d-dde5d82975a7","Type":"ContainerDied","Data":"8c30505cdbae0fed9500f4a37228c641e83d3af7b4d3060a52247899ad349eef"} Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.694520 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c30505cdbae0fed9500f4a37228c641e83d3af7b4d3060a52247899ad349eef" Jan 28 17:07:44 crc kubenswrapper[4877]: I0128 17:07:44.694570 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-79fbr" Jan 28 17:07:47 crc kubenswrapper[4877]: I0128 17:07:47.250092 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" probeResult="failure" output=< Jan 28 17:07:47 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:07:47 crc kubenswrapper[4877]: > Jan 28 17:07:47 crc kubenswrapper[4877]: I0128 17:07:47.633654 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 17:07:47 crc kubenswrapper[4877]: I0128 17:07:47.633959 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-api" containerID="cri-o://cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715" gracePeriod=30 Jan 28 17:07:47 crc kubenswrapper[4877]: I0128 17:07:47.634006 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-listener" containerID="cri-o://548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b" gracePeriod=30 Jan 28 17:07:47 crc kubenswrapper[4877]: I0128 17:07:47.634052 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-notifier" containerID="cri-o://1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6" gracePeriod=30 Jan 28 17:07:47 crc kubenswrapper[4877]: I0128 17:07:47.634079 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-evaluator" containerID="cri-o://24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71" gracePeriod=30 Jan 28 17:07:49 crc kubenswrapper[4877]: I0128 17:07:49.774345 4877 generic.go:334] "Generic (PLEG): container finished" podID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerID="24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71" exitCode=0 Jan 28 17:07:49 crc kubenswrapper[4877]: I0128 17:07:49.775510 4877 generic.go:334] "Generic (PLEG): container finished" podID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerID="cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715" exitCode=0 Jan 28 17:07:49 crc kubenswrapper[4877]: I0128 17:07:49.774413 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerDied","Data":"24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71"} Jan 28 17:07:49 crc kubenswrapper[4877]: I0128 17:07:49.775606 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerDied","Data":"cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715"} Jan 28 17:07:51 crc kubenswrapper[4877]: I0128 17:07:51.803339 4877 generic.go:334] "Generic (PLEG): container finished" podID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerID="548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b" exitCode=0 Jan 28 17:07:51 crc kubenswrapper[4877]: I0128 17:07:51.803551 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerDied","Data":"548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b"} Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.681541 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.820338 4877 generic.go:334] "Generic (PLEG): container finished" podID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerID="1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6" exitCode=0 Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.820414 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerDied","Data":"1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6"} Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.820447 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a","Type":"ContainerDied","Data":"1f03e3acfe26945d0e816f9f9b238eff89cc3934354280361cb801a83a558b04"} Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.820466 4877 scope.go:117] "RemoveContainer" containerID="548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.820657 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.824552 4877 generic.go:334] "Generic (PLEG): container finished" podID="a2ce6077-c81f-45f8-beb5-99d0a4df9c40" containerID="ef2cc96ddb8d016396a2342a8aa7300f2d273d31b35d41e6e224ee9a3f28caf6" exitCode=0 Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.824588 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" event={"ID":"a2ce6077-c81f-45f8-beb5-99d0a4df9c40","Type":"ContainerDied","Data":"ef2cc96ddb8d016396a2342a8aa7300f2d273d31b35d41e6e224ee9a3f28caf6"} Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.860789 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-scripts\") pod \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.860846 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-public-tls-certs\") pod \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.860929 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjtq5\" (UniqueName: \"kubernetes.io/projected/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-kube-api-access-mjtq5\") pod \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.860954 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-config-data\") pod \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.861048 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-internal-tls-certs\") pod \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.861145 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-combined-ca-bundle\") pod \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\" (UID: \"e496d461-a1e5-48b5-a4b0-5ae61e5cb53a\") " Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.866587 4877 scope.go:117] "RemoveContainer" containerID="1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.873674 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-kube-api-access-mjtq5" (OuterVolumeSpecName: "kube-api-access-mjtq5") pod "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" (UID: "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a"). InnerVolumeSpecName "kube-api-access-mjtq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.881583 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-scripts" (OuterVolumeSpecName: "scripts") pod "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" (UID: "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.934104 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" (UID: "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.934551 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" (UID: "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.965815 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjtq5\" (UniqueName: \"kubernetes.io/projected/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-kube-api-access-mjtq5\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.965852 4877 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.965864 4877 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:52 crc kubenswrapper[4877]: I0128 17:07:52.965872 4877 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.003844 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" (UID: "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.068742 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.112388 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-config-data" (OuterVolumeSpecName: "config-data") pod "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" (UID: "e496d461-a1e5-48b5-a4b0-5ae61e5cb53a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.113139 4877 scope.go:117] "RemoveContainer" containerID="24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.149443 4877 scope.go:117] "RemoveContainer" containerID="cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.170566 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.176051 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.192398 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.208813 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.210115 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-listener" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210167 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-listener" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.210200 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-notifier" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210210 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-notifier" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.210231 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-api" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210240 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-api" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.210304 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-evaluator" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210313 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-evaluator" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.210330 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b6f7515-689a-4a2e-807d-dde5d82975a7" containerName="aodh-db-sync" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210341 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b6f7515-689a-4a2e-807d-dde5d82975a7" containerName="aodh-db-sync" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210724 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-listener" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210759 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b6f7515-689a-4a2e-807d-dde5d82975a7" containerName="aodh-db-sync" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210776 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-notifier" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210808 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-api" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.210823 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" containerName="aodh-evaluator" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.214346 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.218303 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.218564 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.219186 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.220278 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-nxphm" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.235050 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.235650 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.239964 4877 scope.go:117] "RemoveContainer" containerID="548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.241376 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b\": container with ID starting with 548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b not found: ID does not exist" containerID="548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.241426 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b"} err="failed to get container status \"548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b\": rpc error: code = NotFound desc = could not find container \"548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b\": container with ID starting with 548238c267d07348d74459eda816ce0054c894cdd1e34249ff66a7e0cc8ed27b not found: ID does not exist" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.241559 4877 scope.go:117] "RemoveContainer" containerID="1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.243024 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6\": container with ID starting with 1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6 not found: ID does not exist" containerID="1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.243088 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6"} err="failed to get container status \"1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6\": rpc error: code = NotFound desc = could not find container \"1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6\": container with ID starting with 1899d70b6e6172c561f0009e9985ce54d10522df458f604136ac84297362d6d6 not found: ID does not exist" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.243133 4877 scope.go:117] "RemoveContainer" containerID="24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.243549 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71\": container with ID starting with 24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71 not found: ID does not exist" containerID="24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.243600 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71"} err="failed to get container status \"24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71\": rpc error: code = NotFound desc = could not find container \"24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71\": container with ID starting with 24d25f7ff80e39c21a6b3a0ee610cb0ad7d33b64aa1c8f12399598f305447f71 not found: ID does not exist" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.243635 4877 scope.go:117] "RemoveContainer" containerID="cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715" Jan 28 17:07:53 crc kubenswrapper[4877]: E0128 17:07:53.243918 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715\": container with ID starting with cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715 not found: ID does not exist" containerID="cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.243952 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715"} err="failed to get container status \"cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715\": rpc error: code = NotFound desc = could not find container \"cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715\": container with ID starting with cd874473116d0bd1dc0097fc84f4daca1a60090731d237396af4375863469715 not found: ID does not exist" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.349368 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e496d461-a1e5-48b5-a4b0-5ae61e5cb53a" path="/var/lib/kubelet/pods/e496d461-a1e5-48b5-a4b0-5ae61e5cb53a/volumes" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.377109 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.377498 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcmxn\" (UniqueName: \"kubernetes.io/projected/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-kube-api-access-jcmxn\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.377714 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-scripts\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.377916 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-public-tls-certs\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.378289 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-config-data\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.379026 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-internal-tls-certs\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.485277 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-internal-tls-certs\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.485443 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.485565 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcmxn\" (UniqueName: \"kubernetes.io/projected/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-kube-api-access-jcmxn\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.485651 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-scripts\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.485702 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-public-tls-certs\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.485857 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-config-data\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.492599 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-internal-tls-certs\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.492884 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.492893 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-scripts\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.496315 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-public-tls-certs\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.497690 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-config-data\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.509061 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcmxn\" (UniqueName: \"kubernetes.io/projected/3e9dfe3e-469f-49a4-9956-85cc87e7a16a-kube-api-access-jcmxn\") pod \"aodh-0\" (UID: \"3e9dfe3e-469f-49a4-9956-85cc87e7a16a\") " pod="openstack/aodh-0" Jan 28 17:07:53 crc kubenswrapper[4877]: I0128 17:07:53.547357 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 17:07:54 crc kubenswrapper[4877]: W0128 17:07:54.058441 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e9dfe3e_469f_49a4_9956_85cc87e7a16a.slice/crio-bb88b13d1c7c40e4964fe1d6c0acb21217b24ad4574316d380ca19eac5d17a75 WatchSource:0}: Error finding container bb88b13d1c7c40e4964fe1d6c0acb21217b24ad4574316d380ca19eac5d17a75: Status 404 returned error can't find the container with id bb88b13d1c7c40e4964fe1d6c0acb21217b24ad4574316d380ca19eac5d17a75 Jan 28 17:07:54 crc kubenswrapper[4877]: I0128 17:07:54.063759 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:07:54 crc kubenswrapper[4877]: I0128 17:07:54.075062 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 17:07:54 crc kubenswrapper[4877]: I0128 17:07:54.859126 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3e9dfe3e-469f-49a4-9956-85cc87e7a16a","Type":"ContainerStarted","Data":"bb88b13d1c7c40e4964fe1d6c0acb21217b24ad4574316d380ca19eac5d17a75"} Jan 28 17:07:55 crc kubenswrapper[4877]: I0128 17:07:55.896297 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3e9dfe3e-469f-49a4-9956-85cc87e7a16a","Type":"ContainerStarted","Data":"130fb6023f54414b2b8fa02126f35cc1327e4179d1e30dba9ce7609332fe6de1"} Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.132910 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.244304 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.272876 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v694g\" (UniqueName: \"kubernetes.io/projected/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-kube-api-access-v694g\") pod \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.272928 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-ssh-key-openstack-edpm-ipam\") pod \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.273968 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-inventory\") pod \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.274601 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-repo-setup-combined-ca-bundle\") pod \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\" (UID: \"a2ce6077-c81f-45f8-beb5-99d0a4df9c40\") " Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.279737 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-kube-api-access-v694g" (OuterVolumeSpecName: "kube-api-access-v694g") pod "a2ce6077-c81f-45f8-beb5-99d0a4df9c40" (UID: "a2ce6077-c81f-45f8-beb5-99d0a4df9c40"). InnerVolumeSpecName "kube-api-access-v694g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.282069 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "a2ce6077-c81f-45f8-beb5-99d0a4df9c40" (UID: "a2ce6077-c81f-45f8-beb5-99d0a4df9c40"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.315045 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.315265 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-inventory" (OuterVolumeSpecName: "inventory") pod "a2ce6077-c81f-45f8-beb5-99d0a4df9c40" (UID: "a2ce6077-c81f-45f8-beb5-99d0a4df9c40"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.320305 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a2ce6077-c81f-45f8-beb5-99d0a4df9c40" (UID: "a2ce6077-c81f-45f8-beb5-99d0a4df9c40"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.378469 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.378532 4877 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.378549 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v694g\" (UniqueName: \"kubernetes.io/projected/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-kube-api-access-v694g\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.378566 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a2ce6077-c81f-45f8-beb5-99d0a4df9c40-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.483609 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fsrk2"] Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.911104 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.911108 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-zdbmp" event={"ID":"a2ce6077-c81f-45f8-beb5-99d0a4df9c40","Type":"ContainerDied","Data":"f532892fbe8d1c570f9f81f324cbbc0dad82ad096088bec309414feae4f26855"} Jan 28 17:07:56 crc kubenswrapper[4877]: I0128 17:07:56.912265 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f532892fbe8d1c570f9f81f324cbbc0dad82ad096088bec309414feae4f26855" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.228751 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw"] Jan 28 17:07:57 crc kubenswrapper[4877]: E0128 17:07:57.229448 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ce6077-c81f-45f8-beb5-99d0a4df9c40" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.229465 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ce6077-c81f-45f8-beb5-99d0a4df9c40" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.229712 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2ce6077-c81f-45f8-beb5-99d0a4df9c40" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.230497 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.233002 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.233033 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.233119 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.233261 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.259143 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw"] Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.299905 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqcgh\" (UniqueName: \"kubernetes.io/projected/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-kube-api-access-kqcgh\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.300092 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.300216 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.402297 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.402503 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.402678 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqcgh\" (UniqueName: \"kubernetes.io/projected/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-kube-api-access-kqcgh\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.407357 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.408506 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.422266 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.422401 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqcgh\" (UniqueName: \"kubernetes.io/projected/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-kube-api-access-kqcgh\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.424572 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rp7tw\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.560425 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.568018 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.991557 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fsrk2" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" containerID="cri-o://cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4" gracePeriod=2 Jan 28 17:07:57 crc kubenswrapper[4877]: I0128 17:07:57.992767 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3e9dfe3e-469f-49a4-9956-85cc87e7a16a","Type":"ContainerStarted","Data":"68aef30f3003cc1d2a13249d60155942ae3166a9cc91b8c4127a322a1326387e"} Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.478306 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw"] Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.754738 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.863063 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sskq9\" (UniqueName: \"kubernetes.io/projected/e5954741-e854-495a-9122-509fcfa1ec6c-kube-api-access-sskq9\") pod \"e5954741-e854-495a-9122-509fcfa1ec6c\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.863891 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-utilities\") pod \"e5954741-e854-495a-9122-509fcfa1ec6c\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.864170 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-catalog-content\") pod \"e5954741-e854-495a-9122-509fcfa1ec6c\" (UID: \"e5954741-e854-495a-9122-509fcfa1ec6c\") " Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.865087 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-utilities" (OuterVolumeSpecName: "utilities") pod "e5954741-e854-495a-9122-509fcfa1ec6c" (UID: "e5954741-e854-495a-9122-509fcfa1ec6c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.865597 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.868684 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5954741-e854-495a-9122-509fcfa1ec6c-kube-api-access-sskq9" (OuterVolumeSpecName: "kube-api-access-sskq9") pod "e5954741-e854-495a-9122-509fcfa1ec6c" (UID: "e5954741-e854-495a-9122-509fcfa1ec6c"). InnerVolumeSpecName "kube-api-access-sskq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:07:58 crc kubenswrapper[4877]: I0128 17:07:58.968731 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sskq9\" (UniqueName: \"kubernetes.io/projected/e5954741-e854-495a-9122-509fcfa1ec6c-kube-api-access-sskq9\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.007195 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" event={"ID":"6fa211a9-eeb8-4c14-9017-ca3ff16981f7","Type":"ContainerStarted","Data":"e914ecad4034eb8ca3a340fb159a4716b4784419d265d460a2a8c998865b16c9"} Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.007988 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5954741-e854-495a-9122-509fcfa1ec6c" (UID: "e5954741-e854-495a-9122-509fcfa1ec6c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.010678 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3e9dfe3e-469f-49a4-9956-85cc87e7a16a","Type":"ContainerStarted","Data":"acd3022274928bcef2cc5e121f9938680caeca1f199a7764b150e1cbb38c8a96"} Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.013791 4877 generic.go:334] "Generic (PLEG): container finished" podID="e5954741-e854-495a-9122-509fcfa1ec6c" containerID="cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4" exitCode=0 Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.013829 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fsrk2" event={"ID":"e5954741-e854-495a-9122-509fcfa1ec6c","Type":"ContainerDied","Data":"cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4"} Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.013858 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fsrk2" event={"ID":"e5954741-e854-495a-9122-509fcfa1ec6c","Type":"ContainerDied","Data":"4a587856150e6046100be0b70885ba76216cea65928c0ff68b42fa2bf2fe0d76"} Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.013872 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fsrk2" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.013879 4877 scope.go:117] "RemoveContainer" containerID="cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.058919 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fsrk2"] Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.070859 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fsrk2"] Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.071285 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5954741-e854-495a-9122-509fcfa1ec6c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.226260 4877 scope.go:117] "RemoveContainer" containerID="c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.255733 4877 scope.go:117] "RemoveContainer" containerID="7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.348086 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" path="/var/lib/kubelet/pods/e5954741-e854-495a-9122-509fcfa1ec6c/volumes" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.439213 4877 scope.go:117] "RemoveContainer" containerID="cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4" Jan 28 17:07:59 crc kubenswrapper[4877]: E0128 17:07:59.440064 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4\": container with ID starting with cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4 not found: ID does not exist" containerID="cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.440106 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4"} err="failed to get container status \"cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4\": rpc error: code = NotFound desc = could not find container \"cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4\": container with ID starting with cc45064933bfd058266db3abcefe4acc0adb5585f3df85d7a0231bf3f414acb4 not found: ID does not exist" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.440135 4877 scope.go:117] "RemoveContainer" containerID="c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882" Jan 28 17:07:59 crc kubenswrapper[4877]: E0128 17:07:59.440628 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882\": container with ID starting with c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882 not found: ID does not exist" containerID="c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.440661 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882"} err="failed to get container status \"c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882\": rpc error: code = NotFound desc = could not find container \"c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882\": container with ID starting with c1fabd10a246823b9ac3c710827b5ee5a99d881960131e15933f7dee66569882 not found: ID does not exist" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.440687 4877 scope.go:117] "RemoveContainer" containerID="7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690" Jan 28 17:07:59 crc kubenswrapper[4877]: E0128 17:07:59.441038 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690\": container with ID starting with 7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690 not found: ID does not exist" containerID="7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.441065 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690"} err="failed to get container status \"7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690\": rpc error: code = NotFound desc = could not find container \"7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690\": container with ID starting with 7f378fdbd5bd05475089a1f397195fa927db19a5c9e1a622be1d17dd39bcb690 not found: ID does not exist" Jan 28 17:07:59 crc kubenswrapper[4877]: I0128 17:07:59.471808 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:08:00 crc kubenswrapper[4877]: I0128 17:08:00.028577 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" event={"ID":"6fa211a9-eeb8-4c14-9017-ca3ff16981f7","Type":"ContainerStarted","Data":"ccff3fa65add3cc5f9905b94a11764f8c93907c84443351cd137dc8586676804"} Jan 28 17:08:00 crc kubenswrapper[4877]: I0128 17:08:00.033825 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3e9dfe3e-469f-49a4-9956-85cc87e7a16a","Type":"ContainerStarted","Data":"89a02bc6bd79c4ffbf0a5e42b313e25768b48fc748bd8665c03bbf9c4882bb23"} Jan 28 17:08:00 crc kubenswrapper[4877]: I0128 17:08:00.055215 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" podStartSLOduration=2.084117486 podStartE2EDuration="3.055194016s" podCreationTimestamp="2026-01-28 17:07:57 +0000 UTC" firstStartedPulling="2026-01-28 17:07:58.49636112 +0000 UTC m=+1982.054688008" lastFinishedPulling="2026-01-28 17:07:59.46743765 +0000 UTC m=+1983.025764538" observedRunningTime="2026-01-28 17:08:00.046086851 +0000 UTC m=+1983.604413739" watchObservedRunningTime="2026-01-28 17:08:00.055194016 +0000 UTC m=+1983.613520904" Jan 28 17:08:00 crc kubenswrapper[4877]: I0128 17:08:00.071644 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.667819496 podStartE2EDuration="7.071621208s" podCreationTimestamp="2026-01-28 17:07:53 +0000 UTC" firstStartedPulling="2026-01-28 17:07:54.063364859 +0000 UTC m=+1977.621691747" lastFinishedPulling="2026-01-28 17:07:59.467166571 +0000 UTC m=+1983.025493459" observedRunningTime="2026-01-28 17:08:00.065931685 +0000 UTC m=+1983.624258573" watchObservedRunningTime="2026-01-28 17:08:00.071621208 +0000 UTC m=+1983.629948096" Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.054570 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-7395-account-create-update-2gfnc"] Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.069664 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-6m8l9"] Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.085610 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-6m8l9"] Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.099518 4877 generic.go:334] "Generic (PLEG): container finished" podID="6fa211a9-eeb8-4c14-9017-ca3ff16981f7" containerID="ccff3fa65add3cc5f9905b94a11764f8c93907c84443351cd137dc8586676804" exitCode=0 Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.099751 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" event={"ID":"6fa211a9-eeb8-4c14-9017-ca3ff16981f7","Type":"ContainerDied","Data":"ccff3fa65add3cc5f9905b94a11764f8c93907c84443351cd137dc8586676804"} Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.100436 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-7395-account-create-update-2gfnc"] Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.346747 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39b89193-b00c-41ea-bf92-cc3aebce6a31" path="/var/lib/kubelet/pods/39b89193-b00c-41ea-bf92-cc3aebce6a31/volumes" Jan 28 17:08:03 crc kubenswrapper[4877]: I0128 17:08:03.350500 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0f6fbde-4240-4911-9b0d-50740836e659" path="/var/lib/kubelet/pods/b0f6fbde-4240-4911-9b0d-50740836e659/volumes" Jan 28 17:08:04 crc kubenswrapper[4877]: I0128 17:08:04.811456 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:08:04 crc kubenswrapper[4877]: I0128 17:08:04.930185 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqcgh\" (UniqueName: \"kubernetes.io/projected/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-kube-api-access-kqcgh\") pod \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " Jan 28 17:08:04 crc kubenswrapper[4877]: I0128 17:08:04.930377 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-ssh-key-openstack-edpm-ipam\") pod \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " Jan 28 17:08:04 crc kubenswrapper[4877]: I0128 17:08:04.930545 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-inventory\") pod \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\" (UID: \"6fa211a9-eeb8-4c14-9017-ca3ff16981f7\") " Jan 28 17:08:04 crc kubenswrapper[4877]: I0128 17:08:04.935851 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-kube-api-access-kqcgh" (OuterVolumeSpecName: "kube-api-access-kqcgh") pod "6fa211a9-eeb8-4c14-9017-ca3ff16981f7" (UID: "6fa211a9-eeb8-4c14-9017-ca3ff16981f7"). InnerVolumeSpecName "kube-api-access-kqcgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:08:04 crc kubenswrapper[4877]: I0128 17:08:04.963812 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "6fa211a9-eeb8-4c14-9017-ca3ff16981f7" (UID: "6fa211a9-eeb8-4c14-9017-ca3ff16981f7"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:08:04 crc kubenswrapper[4877]: I0128 17:08:04.970270 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-inventory" (OuterVolumeSpecName: "inventory") pod "6fa211a9-eeb8-4c14-9017-ca3ff16981f7" (UID: "6fa211a9-eeb8-4c14-9017-ca3ff16981f7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.034022 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.034080 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.034108 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqcgh\" (UniqueName: \"kubernetes.io/projected/6fa211a9-eeb8-4c14-9017-ca3ff16981f7-kube-api-access-kqcgh\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.124243 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" event={"ID":"6fa211a9-eeb8-4c14-9017-ca3ff16981f7","Type":"ContainerDied","Data":"e914ecad4034eb8ca3a340fb159a4716b4784419d265d460a2a8c998865b16c9"} Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.124288 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rp7tw" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.124298 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e914ecad4034eb8ca3a340fb159a4716b4784419d265d460a2a8c998865b16c9" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.269598 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf"] Jan 28 17:08:05 crc kubenswrapper[4877]: E0128 17:08:05.271915 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.271954 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" Jan 28 17:08:05 crc kubenswrapper[4877]: E0128 17:08:05.272028 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa211a9-eeb8-4c14-9017-ca3ff16981f7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.272042 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa211a9-eeb8-4c14-9017-ca3ff16981f7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 17:08:05 crc kubenswrapper[4877]: E0128 17:08:05.272060 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="extract-utilities" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.272070 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="extract-utilities" Jan 28 17:08:05 crc kubenswrapper[4877]: E0128 17:08:05.272160 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="extract-content" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.272174 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="extract-content" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.272821 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5954741-e854-495a-9122-509fcfa1ec6c" containerName="registry-server" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.272905 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fa211a9-eeb8-4c14-9017-ca3ff16981f7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.280835 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.290772 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.297597 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.298311 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.301797 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.323734 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf"] Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.347403 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.347523 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.347610 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.347684 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t4qj\" (UniqueName: \"kubernetes.io/projected/1c676b41-42b6-44a8-92b5-ee8f883e2793-kube-api-access-6t4qj\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.450413 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.450535 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.451123 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.451277 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t4qj\" (UniqueName: \"kubernetes.io/projected/1c676b41-42b6-44a8-92b5-ee8f883e2793-kube-api-access-6t4qj\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.454261 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.454453 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.456086 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.469812 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t4qj\" (UniqueName: \"kubernetes.io/projected/1c676b41-42b6-44a8-92b5-ee8f883e2793-kube-api-access-6t4qj\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:05 crc kubenswrapper[4877]: I0128 17:08:05.618160 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:08:06 crc kubenswrapper[4877]: I0128 17:08:06.192548 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf"] Jan 28 17:08:07 crc kubenswrapper[4877]: I0128 17:08:07.149907 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" event={"ID":"1c676b41-42b6-44a8-92b5-ee8f883e2793","Type":"ContainerStarted","Data":"41699e90fc11b99784b887805a4624cbff8f8c1924054515b3fe2fadc209f971"} Jan 28 17:08:07 crc kubenswrapper[4877]: I0128 17:08:07.150241 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" event={"ID":"1c676b41-42b6-44a8-92b5-ee8f883e2793","Type":"ContainerStarted","Data":"0f6a5ae339ea31001a622f2e480d28c3a5d6531c84a40271eee6ffc7302fd37c"} Jan 28 17:08:07 crc kubenswrapper[4877]: I0128 17:08:07.182641 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" podStartSLOduration=1.569937376 podStartE2EDuration="2.182621363s" podCreationTimestamp="2026-01-28 17:08:05 +0000 UTC" firstStartedPulling="2026-01-28 17:08:06.196935321 +0000 UTC m=+1989.755262209" lastFinishedPulling="2026-01-28 17:08:06.809619308 +0000 UTC m=+1990.367946196" observedRunningTime="2026-01-28 17:08:07.166825968 +0000 UTC m=+1990.725152856" watchObservedRunningTime="2026-01-28 17:08:07.182621363 +0000 UTC m=+1990.740948251" Jan 28 17:08:08 crc kubenswrapper[4877]: I0128 17:08:08.161720 4877 generic.go:334] "Generic (PLEG): container finished" podID="ffc790a6-8bf2-4088-8ec5-1720988944ae" containerID="8e7fd88207cf2fc03e843580a2bc45136018689faa935ef818f3426d8a1618e8" exitCode=0 Jan 28 17:08:08 crc kubenswrapper[4877]: I0128 17:08:08.161806 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"ffc790a6-8bf2-4088-8ec5-1720988944ae","Type":"ContainerDied","Data":"8e7fd88207cf2fc03e843580a2bc45136018689faa935ef818f3426d8a1618e8"} Jan 28 17:08:09 crc kubenswrapper[4877]: I0128 17:08:09.185753 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-1" event={"ID":"ffc790a6-8bf2-4088-8ec5-1720988944ae","Type":"ContainerStarted","Data":"60042463369dc890cff5e9687ac1e3e1a9ee386a55ffa0f91b02ddc0fa328d75"} Jan 28 17:08:09 crc kubenswrapper[4877]: I0128 17:08:09.186386 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-1" Jan 28 17:08:09 crc kubenswrapper[4877]: I0128 17:08:09.223106 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-1" podStartSLOduration=37.223085998 podStartE2EDuration="37.223085998s" podCreationTimestamp="2026-01-28 17:07:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:08:09.212237146 +0000 UTC m=+1992.770564044" watchObservedRunningTime="2026-01-28 17:08:09.223085998 +0000 UTC m=+1992.781412896" Jan 28 17:08:22 crc kubenswrapper[4877]: I0128 17:08:22.991689 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-1" Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.045068 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.071500 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-95w8q"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.200090 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-hjfls"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.239626 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-dhv7z"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.271877 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-95w8q"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.290664 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-hjfls"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.312240 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-p94jx"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.324280 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-dhv7z"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.433330 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="172466e0-ca09-497b-8356-3099ad380f3a" path="/var/lib/kubelet/pods/172466e0-ca09-497b-8356-3099ad380f3a/volumes" Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.436454 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2117f641-020b-4fbc-b813-e56dab47f1c6" path="/var/lib/kubelet/pods/2117f641-020b-4fbc-b813-e56dab47f1c6/volumes" Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.437234 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed43d723-2ac6-40d0-aaea-148be1ceb3a4" path="/var/lib/kubelet/pods/ed43d723-2ac6-40d0-aaea-148be1ceb3a4/volumes" Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.438389 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-p94jx"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.438422 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-eeda-account-create-update-pmwp8"] Jan 28 17:08:23 crc kubenswrapper[4877]: I0128 17:08:23.438435 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-eeda-account-create-update-pmwp8"] Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.188461 4877 scope.go:117] "RemoveContainer" containerID="ff675c788d25e2dc6a8ff30a48039ea321025b0eaf0bff5337f4a4e3dd5732eb" Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.307870 4877 scope.go:117] "RemoveContainer" containerID="bebab129e5af01b9219c90b9acda44835cbf55e99559ebd59ee7984c427a9eff" Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.333204 4877 scope.go:117] "RemoveContainer" containerID="5133cbf756c0fbe72c5dedf2ead3cc26813b5eae707482cff2fd43ff854f3127" Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.440765 4877 scope.go:117] "RemoveContainer" containerID="21288ee2ae5e7d109aad960e485ec91fc7cc722aafb84c45a449320cd2592c1f" Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.511619 4877 scope.go:117] "RemoveContainer" containerID="f42cee324b23a633e0a4278f7eafd8b837b1d3c804e14fb895f80a2336cd45b6" Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.661714 4877 scope.go:117] "RemoveContainer" containerID="004c071c8c4b36125f906dc9bbb5e9889e65a4a289c9102dc9997c3a2273345d" Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.708732 4877 scope.go:117] "RemoveContainer" containerID="f1e2fb3d963a5c5f30e269a9463e7deea2cfead9bf664200ccf82d653420eada" Jan 28 17:08:24 crc kubenswrapper[4877]: I0128 17:08:24.734949 4877 scope.go:117] "RemoveContainer" containerID="ce4ec7bcf353c45f6950bb0d9ef9f7033b93e11f9430a850d977be89392f4b7f" Jan 28 17:08:25 crc kubenswrapper[4877]: I0128 17:08:25.160758 4877 scope.go:117] "RemoveContainer" containerID="ab66e67e6a1d2d4a3369839f697f1625ae2f81705ddded572cba9f6dbdb3b757" Jan 28 17:08:25 crc kubenswrapper[4877]: I0128 17:08:25.209625 4877 scope.go:117] "RemoveContainer" containerID="fbd861627eeb60a09876529ea4a64e3b217414fc001ef4e87aa4dc127ce7425c" Jan 28 17:08:25 crc kubenswrapper[4877]: I0128 17:08:25.344995 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7997d91-de54-4f22-aa26-34db8d4d0c48" path="/var/lib/kubelet/pods/b7997d91-de54-4f22-aa26-34db8d4d0c48/volumes" Jan 28 17:08:25 crc kubenswrapper[4877]: I0128 17:08:25.345814 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70e089-7495-454b-8f44-eddda03fd848" path="/var/lib/kubelet/pods/cd70e089-7495-454b-8f44-eddda03fd848/volumes" Jan 28 17:08:25 crc kubenswrapper[4877]: I0128 17:08:25.522426 4877 scope.go:117] "RemoveContainer" containerID="836af63dae82f19a79347cb78d9d64884f4f01e0da3e2c54ccc7a2a4ed4bd753" Jan 28 17:08:25 crc kubenswrapper[4877]: I0128 17:08:25.607014 4877 scope.go:117] "RemoveContainer" containerID="45d34b4d7c70dddd2467b9e38385da961dad5b64f625d55daf6fd4518f74ce84" Jan 28 17:08:28 crc kubenswrapper[4877]: I0128 17:08:28.045517 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-20ab-account-create-update-j946m"] Jan 28 17:08:28 crc kubenswrapper[4877]: I0128 17:08:28.061070 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-20ab-account-create-update-j946m"] Jan 28 17:08:28 crc kubenswrapper[4877]: I0128 17:08:28.223738 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="rabbitmq" containerID="cri-o://8ef2c662e9cfafd69c72e1ddd8c5e00cc88fd087a6b6090f6f2bd73fa71f67ce" gracePeriod=604795 Jan 28 17:08:29 crc kubenswrapper[4877]: I0128 17:08:29.033438 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-adb0-account-create-update-rgtpf"] Jan 28 17:08:29 crc kubenswrapper[4877]: I0128 17:08:29.047192 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-ab30-account-create-update-k22df"] Jan 28 17:08:29 crc kubenswrapper[4877]: I0128 17:08:29.058962 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-ab30-account-create-update-k22df"] Jan 28 17:08:29 crc kubenswrapper[4877]: I0128 17:08:29.069595 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-adb0-account-create-update-rgtpf"] Jan 28 17:08:29 crc kubenswrapper[4877]: I0128 17:08:29.348924 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49096c30-9fbe-45ef-8cb5-1808efde086b" path="/var/lib/kubelet/pods/49096c30-9fbe-45ef-8cb5-1808efde086b/volumes" Jan 28 17:08:29 crc kubenswrapper[4877]: I0128 17:08:29.350313 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9895a837-fc16-4072-9fb8-9b79cb56b53b" path="/var/lib/kubelet/pods/9895a837-fc16-4072-9fb8-9b79cb56b53b/volumes" Jan 28 17:08:29 crc kubenswrapper[4877]: I0128 17:08:29.351107 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7450209-21cd-4ad0-b700-080ff83306e1" path="/var/lib/kubelet/pods/b7450209-21cd-4ad0-b700-080ff83306e1/volumes" Jan 28 17:08:31 crc kubenswrapper[4877]: I0128 17:08:31.087102 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.128:5671: connect: connection refused" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.523047 4877 generic.go:334] "Generic (PLEG): container finished" podID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerID="8ef2c662e9cfafd69c72e1ddd8c5e00cc88fd087a6b6090f6f2bd73fa71f67ce" exitCode=0 Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.523158 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"951f6a86-2dbc-402b-bb10-9a16d347c697","Type":"ContainerDied","Data":"8ef2c662e9cfafd69c72e1ddd8c5e00cc88fd087a6b6090f6f2bd73fa71f67ce"} Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.523664 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"951f6a86-2dbc-402b-bb10-9a16d347c697","Type":"ContainerDied","Data":"24ce48cff87d13c1dcf823f413b0ce90d32a1ae7cf3e57ec845c45bc1c4a8f25"} Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.523686 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24ce48cff87d13c1dcf823f413b0ce90d32a1ae7cf3e57ec845c45bc1c4a8f25" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.619711 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.762180 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-confd\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.771696 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.771969 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-erlang-cookie\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.772165 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/951f6a86-2dbc-402b-bb10-9a16d347c697-pod-info\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.772348 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/951f6a86-2dbc-402b-bb10-9a16d347c697-erlang-cookie-secret\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.772461 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-plugins-conf\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.772610 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-config-data\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.772761 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbpmr\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-kube-api-access-bbpmr\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.772846 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-plugins\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.773010 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-server-conf\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.773196 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-tls\") pod \"951f6a86-2dbc-402b-bb10-9a16d347c697\" (UID: \"951f6a86-2dbc-402b-bb10-9a16d347c697\") " Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.773802 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.774458 4877 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.781227 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.785459 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.793722 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/951f6a86-2dbc-402b-bb10-9a16d347c697-pod-info" (OuterVolumeSpecName: "pod-info") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.819705 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/951f6a86-2dbc-402b-bb10-9a16d347c697-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.819912 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.819992 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-kube-api-access-bbpmr" (OuterVolumeSpecName: "kube-api-access-bbpmr") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "kube-api-access-bbpmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.864456 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-config-data" (OuterVolumeSpecName: "config-data") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.878358 4877 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/951f6a86-2dbc-402b-bb10-9a16d347c697-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.878388 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.878399 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbpmr\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-kube-api-access-bbpmr\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.878410 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.878420 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.878430 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.878438 4877 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/951f6a86-2dbc-402b-bb10-9a16d347c697-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.944466 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-server-conf" (OuterVolumeSpecName: "server-conf") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:08:36 crc kubenswrapper[4877]: I0128 17:08:36.980931 4877 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/951f6a86-2dbc-402b-bb10-9a16d347c697-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.055693 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-f8bsb"] Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.062201 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.075847 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-f8bsb"] Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.076224 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.076277 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.084389 4877 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/951f6a86-2dbc-402b-bb10-9a16d347c697-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.129656 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6" (OuterVolumeSpecName: "persistence") pod "951f6a86-2dbc-402b-bb10-9a16d347c697" (UID: "951f6a86-2dbc-402b-bb10-9a16d347c697"). InnerVolumeSpecName "pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.188457 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") on node \"crc\" " Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.232110 4877 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.232283 4877 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6") on node "crc" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.291686 4877 reconciler_common.go:293] "Volume detached for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") on node \"crc\" DevicePath \"\"" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.348816 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27ba5678-96ac-4505-8313-84cab2d57434" path="/var/lib/kubelet/pods/27ba5678-96ac-4505-8313-84cab2d57434/volumes" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.537750 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.574182 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.588945 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.626034 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 17:08:37 crc kubenswrapper[4877]: E0128 17:08:37.628317 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="rabbitmq" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.628394 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="rabbitmq" Jan 28 17:08:37 crc kubenswrapper[4877]: E0128 17:08:37.628506 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="setup-container" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.628520 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="setup-container" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.629080 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" containerName="rabbitmq" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.649367 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.656361 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.704348 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/08ba4011-03db-40a0-94ee-f071bf9e76d9-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.704441 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.704505 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.704760 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-config-data\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.704889 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/08ba4011-03db-40a0-94ee-f071bf9e76d9-pod-info\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.704965 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.705095 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-server-conf\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.705141 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.705282 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.705413 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.705497 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jmdl\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-kube-api-access-4jmdl\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807140 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-server-conf\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807454 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807526 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807583 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807612 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jmdl\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-kube-api-access-4jmdl\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807658 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/08ba4011-03db-40a0-94ee-f071bf9e76d9-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807693 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807722 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807767 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-config-data\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807807 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/08ba4011-03db-40a0-94ee-f071bf9e76d9-pod-info\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.807833 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.808618 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.808671 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-server-conf\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.808698 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.808760 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.811089 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/08ba4011-03db-40a0-94ee-f071bf9e76d9-config-data\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.813150 4877 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.813187 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/445da7c8eee792eca514a7f00c533dabbf59427d0648942612a95e6cb358ee0f/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.813312 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.817001 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/08ba4011-03db-40a0-94ee-f071bf9e76d9-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.817376 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/08ba4011-03db-40a0-94ee-f071bf9e76d9-pod-info\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.817422 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.876051 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jmdl\" (UniqueName: \"kubernetes.io/projected/08ba4011-03db-40a0-94ee-f071bf9e76d9-kube-api-access-4jmdl\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.971836 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6abebcd2-7de4-4b3f-9a42-5acb705c43d6\") pod \"rabbitmq-server-0\" (UID: \"08ba4011-03db-40a0-94ee-f071bf9e76d9\") " pod="openstack/rabbitmq-server-0" Jan 28 17:08:37 crc kubenswrapper[4877]: I0128 17:08:37.981683 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 17:08:38 crc kubenswrapper[4877]: I0128 17:08:38.049655 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-v5fwc"] Jan 28 17:08:38 crc kubenswrapper[4877]: I0128 17:08:38.069270 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d27d-account-create-update-vqv6l"] Jan 28 17:08:38 crc kubenswrapper[4877]: I0128 17:08:38.094565 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d27d-account-create-update-vqv6l"] Jan 28 17:08:38 crc kubenswrapper[4877]: I0128 17:08:38.132206 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-v5fwc"] Jan 28 17:08:38 crc kubenswrapper[4877]: I0128 17:08:38.590456 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 17:08:39 crc kubenswrapper[4877]: I0128 17:08:39.349330 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="951f6a86-2dbc-402b-bb10-9a16d347c697" path="/var/lib/kubelet/pods/951f6a86-2dbc-402b-bb10-9a16d347c697/volumes" Jan 28 17:08:39 crc kubenswrapper[4877]: I0128 17:08:39.351994 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b658b847-5c12-40f3-8602-a0897a18066f" path="/var/lib/kubelet/pods/b658b847-5c12-40f3-8602-a0897a18066f/volumes" Jan 28 17:08:39 crc kubenswrapper[4877]: I0128 17:08:39.353224 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b73dca88-cd81-4389-86cf-dd7973a4e489" path="/var/lib/kubelet/pods/b73dca88-cd81-4389-86cf-dd7973a4e489/volumes" Jan 28 17:08:39 crc kubenswrapper[4877]: I0128 17:08:39.562678 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"08ba4011-03db-40a0-94ee-f071bf9e76d9","Type":"ContainerStarted","Data":"7a29bc8ee413785b0551ef796d0ec127ac08ced08a7952781023544de945d3b1"} Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.040825 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-mxsg9"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.056185 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-e6a7-account-create-update-f2jl2"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.073988 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-526f-account-create-update-ngljr"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.088410 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-qg5jt"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.098967 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-qg5jt"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.111539 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d6a6-account-create-update-zc2m5"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.128938 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-526f-account-create-update-ngljr"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.146182 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-mxsg9"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.164313 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-e6a7-account-create-update-f2jl2"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.180013 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d6a6-account-create-update-zc2m5"] Jan 28 17:08:40 crc kubenswrapper[4877]: I0128 17:08:40.576121 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"08ba4011-03db-40a0-94ee-f071bf9e76d9","Type":"ContainerStarted","Data":"71334db1a08402af71ac95a94832e600f9bcf77fca0dc0ac23852f9b9912c402"} Jan 28 17:08:41 crc kubenswrapper[4877]: I0128 17:08:41.346336 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6aa96069-1d33-441b-85e7-64c2a06b3860" path="/var/lib/kubelet/pods/6aa96069-1d33-441b-85e7-64c2a06b3860/volumes" Jan 28 17:08:41 crc kubenswrapper[4877]: I0128 17:08:41.349097 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f0118a1-6a58-42e3-b8af-bbc0bbd6777d" path="/var/lib/kubelet/pods/7f0118a1-6a58-42e3-b8af-bbc0bbd6777d/volumes" Jan 28 17:08:41 crc kubenswrapper[4877]: I0128 17:08:41.351141 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8be5d3d7-b0d0-47ba-906c-efe5b871f035" path="/var/lib/kubelet/pods/8be5d3d7-b0d0-47ba-906c-efe5b871f035/volumes" Jan 28 17:08:41 crc kubenswrapper[4877]: I0128 17:08:41.352461 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6c0346e-3510-4fa2-ac67-46bed9447c6b" path="/var/lib/kubelet/pods/d6c0346e-3510-4fa2-ac67-46bed9447c6b/volumes" Jan 28 17:08:41 crc kubenswrapper[4877]: I0128 17:08:41.356610 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f33aa46e-bc47-4499-b263-7a0152340bc4" path="/var/lib/kubelet/pods/f33aa46e-bc47-4499-b263-7a0152340bc4/volumes" Jan 28 17:08:46 crc kubenswrapper[4877]: I0128 17:08:46.039519 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-4rgth"] Jan 28 17:08:46 crc kubenswrapper[4877]: I0128 17:08:46.058737 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-4rgth"] Jan 28 17:08:47 crc kubenswrapper[4877]: I0128 17:08:47.346138 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26904586-2e5c-44de-b91c-0a6d288d6d9e" path="/var/lib/kubelet/pods/26904586-2e5c-44de-b91c-0a6d288d6d9e/volumes" Jan 28 17:09:07 crc kubenswrapper[4877]: I0128 17:09:07.076747 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:09:07 crc kubenswrapper[4877]: I0128 17:09:07.077227 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:09:12 crc kubenswrapper[4877]: I0128 17:09:12.975112 4877 generic.go:334] "Generic (PLEG): container finished" podID="08ba4011-03db-40a0-94ee-f071bf9e76d9" containerID="71334db1a08402af71ac95a94832e600f9bcf77fca0dc0ac23852f9b9912c402" exitCode=0 Jan 28 17:09:12 crc kubenswrapper[4877]: I0128 17:09:12.975347 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"08ba4011-03db-40a0-94ee-f071bf9e76d9","Type":"ContainerDied","Data":"71334db1a08402af71ac95a94832e600f9bcf77fca0dc0ac23852f9b9912c402"} Jan 28 17:09:13 crc kubenswrapper[4877]: I0128 17:09:13.988972 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"08ba4011-03db-40a0-94ee-f071bf9e76d9","Type":"ContainerStarted","Data":"59d5f4ac7a7448fda4b42a790fee9ca1ba7aca243f9daa406d2b8a43199a3526"} Jan 28 17:09:13 crc kubenswrapper[4877]: I0128 17:09:13.989740 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 28 17:09:14 crc kubenswrapper[4877]: I0128 17:09:14.026938 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.026916055 podStartE2EDuration="37.026916055s" podCreationTimestamp="2026-01-28 17:08:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:09:14.015009084 +0000 UTC m=+2057.573335972" watchObservedRunningTime="2026-01-28 17:09:14.026916055 +0000 UTC m=+2057.585242943" Jan 28 17:09:26 crc kubenswrapper[4877]: I0128 17:09:26.831351 4877 scope.go:117] "RemoveContainer" containerID="8000a63f1cb034db07b6545130e0d5fc12c64de9cf3fb2fd9984c8a6eea75d2b" Jan 28 17:09:26 crc kubenswrapper[4877]: I0128 17:09:26.869462 4877 scope.go:117] "RemoveContainer" containerID="fd0204b269633a3159eee494d9bcdccef134ea300f9c1ebc5cc001c7a7209c49" Jan 28 17:09:26 crc kubenswrapper[4877]: I0128 17:09:26.946766 4877 scope.go:117] "RemoveContainer" containerID="adbcd74da6cd3bd303e7b6b4214a66233e04c4a5fcf9e5dae03270a1ddef7a09" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.006445 4877 scope.go:117] "RemoveContainer" containerID="3e8d6e228c3d7e3bc8b5bae2522e027386953e5c58e9387d7140f5ee18f9962b" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.064668 4877 scope.go:117] "RemoveContainer" containerID="ddce209fc785587cb17240da78fe293bc3162cc21168d9e0a6ffcba87c4db0cb" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.178788 4877 scope.go:117] "RemoveContainer" containerID="ada214a4032c423787513923e28048ce45bcac98352a8f888a2adf6e2fe15c89" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.237440 4877 scope.go:117] "RemoveContainer" containerID="fbd4fe8af9b9adc070ca706800ff14bed5a29c3c5d41b226d95526ec439ed16d" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.267101 4877 scope.go:117] "RemoveContainer" containerID="4814d6a51261f73fb6d1a6d3f616147b0069871f1e5588245254e80036c4c35d" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.344449 4877 scope.go:117] "RemoveContainer" containerID="6a3391eb336cb96ec928f057e9c3b5501fe31809d533255e31189ca9860b0a49" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.377686 4877 scope.go:117] "RemoveContainer" containerID="8ef2c662e9cfafd69c72e1ddd8c5e00cc88fd087a6b6090f6f2bd73fa71f67ce" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.436080 4877 scope.go:117] "RemoveContainer" containerID="5d36b2a06fe50bbb399287c88c54c7a07f7826a44098f1b106dee87fb3fa968f" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.473714 4877 scope.go:117] "RemoveContainer" containerID="337b44ba67d4d100a74e91e853b309c31b28a5925eea01a43b24d192e0a1790a" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.623998 4877 scope.go:117] "RemoveContainer" containerID="62d6f23fa596fad2bdb306188ca1e9b4614a956a602f23075b1319f3b40f752f" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.658423 4877 scope.go:117] "RemoveContainer" containerID="b01a3faf5022f37e2d05c5373f278b936ce816b9887f8b193ab5db224f90a560" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.780385 4877 scope.go:117] "RemoveContainer" containerID="258d9070cf54905c414efaf45be6180e8c26a8d40f942989face354b81e61ddd" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.807291 4877 scope.go:117] "RemoveContainer" containerID="3b09f7d0955c178d2f7cc818b6c262d984c7364ec0cdc0e57f632b977dd1613c" Jan 28 17:09:27 crc kubenswrapper[4877]: I0128 17:09:27.837137 4877 scope.go:117] "RemoveContainer" containerID="c3138c25344035be31cb31f1f3aea8098572e5eb4087600f89d10726f58a58d7" Jan 28 17:09:28 crc kubenswrapper[4877]: I0128 17:09:28.001934 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.076731 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.077428 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.077507 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.078464 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c7f0577d72207bd89c21d8eb9092633da62ac51cdf714b1a1e5d82fcc0a22555"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.078546 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://c7f0577d72207bd89c21d8eb9092633da62ac51cdf714b1a1e5d82fcc0a22555" gracePeriod=600 Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.278323 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="c7f0577d72207bd89c21d8eb9092633da62ac51cdf714b1a1e5d82fcc0a22555" exitCode=0 Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.278672 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"c7f0577d72207bd89c21d8eb9092633da62ac51cdf714b1a1e5d82fcc0a22555"} Jan 28 17:09:37 crc kubenswrapper[4877]: I0128 17:09:37.278715 4877 scope.go:117] "RemoveContainer" containerID="8aa11c50cfa5756b80d7980a5b5cb00101ae50415ac6aeb080fb12ec31f31c82" Jan 28 17:09:38 crc kubenswrapper[4877]: I0128 17:09:38.294299 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3"} Jan 28 17:09:49 crc kubenswrapper[4877]: I0128 17:09:49.045496 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-g4wc7"] Jan 28 17:09:49 crc kubenswrapper[4877]: I0128 17:09:49.060249 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-g4wc7"] Jan 28 17:09:49 crc kubenswrapper[4877]: I0128 17:09:49.346547 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="424aed47-f19b-40f2-b8c1-dfc24b8d605d" path="/var/lib/kubelet/pods/424aed47-f19b-40f2-b8c1-dfc24b8d605d/volumes" Jan 28 17:10:28 crc kubenswrapper[4877]: I0128 17:10:28.548537 4877 scope.go:117] "RemoveContainer" containerID="c3b055a7054faeb73cdfbfab890a5b3cf062179dcd2635991b2ffa97c535f6b7" Jan 28 17:10:28 crc kubenswrapper[4877]: I0128 17:10:28.586540 4877 scope.go:117] "RemoveContainer" containerID="2203cc83e2964ab1aa11525d5c20cae3b5e49a9c84e8069d807b07a1456f5448" Jan 28 17:10:28 crc kubenswrapper[4877]: I0128 17:10:28.613503 4877 scope.go:117] "RemoveContainer" containerID="b20aadef947101060d0f434ca40a14d9aa659021699079e8eacf772a9e90e62b" Jan 28 17:10:51 crc kubenswrapper[4877]: I0128 17:10:51.044856 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-lptvh"] Jan 28 17:10:51 crc kubenswrapper[4877]: I0128 17:10:51.058257 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-lptvh"] Jan 28 17:10:51 crc kubenswrapper[4877]: I0128 17:10:51.346077 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="534e973d-d29f-4aac-8922-5f42d27c0770" path="/var/lib/kubelet/pods/534e973d-d29f-4aac-8922-5f42d27c0770/volumes" Jan 28 17:10:52 crc kubenswrapper[4877]: I0128 17:10:52.032432 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-lsxs2"] Jan 28 17:10:52 crc kubenswrapper[4877]: I0128 17:10:52.046348 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-lsxs2"] Jan 28 17:10:53 crc kubenswrapper[4877]: I0128 17:10:53.370682 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a32bcea9-5341-40a1-9715-a43829459366" path="/var/lib/kubelet/pods/a32bcea9-5341-40a1-9715-a43829459366/volumes" Jan 28 17:11:06 crc kubenswrapper[4877]: I0128 17:11:06.043690 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-6hfcz"] Jan 28 17:11:06 crc kubenswrapper[4877]: I0128 17:11:06.058591 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-6hfcz"] Jan 28 17:11:07 crc kubenswrapper[4877]: I0128 17:11:07.345605 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffc3b7f9-8297-46ac-b550-d61d9513187c" path="/var/lib/kubelet/pods/ffc3b7f9-8297-46ac-b550-d61d9513187c/volumes" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.101693 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kjnhj"] Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.105012 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.119295 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjnhj"] Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.208765 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph89s\" (UniqueName: \"kubernetes.io/projected/a2933622-09bb-4395-baf4-32cae3257b5f-kube-api-access-ph89s\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.209000 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-utilities\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.209156 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-catalog-content\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.311839 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-catalog-content\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.312110 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph89s\" (UniqueName: \"kubernetes.io/projected/a2933622-09bb-4395-baf4-32cae3257b5f-kube-api-access-ph89s\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.312147 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-utilities\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.312496 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-catalog-content\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.312640 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-utilities\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.339713 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph89s\" (UniqueName: \"kubernetes.io/projected/a2933622-09bb-4395-baf4-32cae3257b5f-kube-api-access-ph89s\") pod \"redhat-marketplace-kjnhj\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.447138 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:11 crc kubenswrapper[4877]: I0128 17:11:11.976009 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjnhj"] Jan 28 17:11:12 crc kubenswrapper[4877]: I0128 17:11:12.325247 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjnhj" event={"ID":"a2933622-09bb-4395-baf4-32cae3257b5f","Type":"ContainerStarted","Data":"82ea7f0aa9657ef71f8bd5afaab23962a19ec28efd2c67b743a2f3a9a9dbeaac"} Jan 28 17:11:13 crc kubenswrapper[4877]: I0128 17:11:13.342811 4877 generic.go:334] "Generic (PLEG): container finished" podID="a2933622-09bb-4395-baf4-32cae3257b5f" containerID="8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9" exitCode=0 Jan 28 17:11:13 crc kubenswrapper[4877]: I0128 17:11:13.348512 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjnhj" event={"ID":"a2933622-09bb-4395-baf4-32cae3257b5f","Type":"ContainerDied","Data":"8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9"} Jan 28 17:11:15 crc kubenswrapper[4877]: I0128 17:11:15.368160 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjnhj" event={"ID":"a2933622-09bb-4395-baf4-32cae3257b5f","Type":"ContainerStarted","Data":"f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c"} Jan 28 17:11:16 crc kubenswrapper[4877]: I0128 17:11:16.036105 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-957zv"] Jan 28 17:11:16 crc kubenswrapper[4877]: I0128 17:11:16.050167 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-957zv"] Jan 28 17:11:17 crc kubenswrapper[4877]: I0128 17:11:17.030217 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-kfq2d"] Jan 28 17:11:17 crc kubenswrapper[4877]: I0128 17:11:17.043165 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-kfq2d"] Jan 28 17:11:17 crc kubenswrapper[4877]: I0128 17:11:17.344841 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6becdddb-915e-40e0-ba03-9de124ad56c7" path="/var/lib/kubelet/pods/6becdddb-915e-40e0-ba03-9de124ad56c7/volumes" Jan 28 17:11:17 crc kubenswrapper[4877]: I0128 17:11:17.350834 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcc64de1-66f2-48e6-969f-61aa68773678" path="/var/lib/kubelet/pods/dcc64de1-66f2-48e6-969f-61aa68773678/volumes" Jan 28 17:11:17 crc kubenswrapper[4877]: I0128 17:11:17.392962 4877 generic.go:334] "Generic (PLEG): container finished" podID="a2933622-09bb-4395-baf4-32cae3257b5f" containerID="f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c" exitCode=0 Jan 28 17:11:17 crc kubenswrapper[4877]: I0128 17:11:17.393016 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjnhj" event={"ID":"a2933622-09bb-4395-baf4-32cae3257b5f","Type":"ContainerDied","Data":"f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c"} Jan 28 17:11:19 crc kubenswrapper[4877]: I0128 17:11:19.421742 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjnhj" event={"ID":"a2933622-09bb-4395-baf4-32cae3257b5f","Type":"ContainerStarted","Data":"81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb"} Jan 28 17:11:19 crc kubenswrapper[4877]: I0128 17:11:19.453431 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kjnhj" podStartSLOduration=3.037650294 podStartE2EDuration="8.453406044s" podCreationTimestamp="2026-01-28 17:11:11 +0000 UTC" firstStartedPulling="2026-01-28 17:11:13.347028553 +0000 UTC m=+2176.905355441" lastFinishedPulling="2026-01-28 17:11:18.762784303 +0000 UTC m=+2182.321111191" observedRunningTime="2026-01-28 17:11:19.446462547 +0000 UTC m=+2183.004789445" watchObservedRunningTime="2026-01-28 17:11:19.453406044 +0000 UTC m=+2183.011732932" Jan 28 17:11:21 crc kubenswrapper[4877]: I0128 17:11:21.448123 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:21 crc kubenswrapper[4877]: I0128 17:11:21.448450 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:22 crc kubenswrapper[4877]: I0128 17:11:22.497259 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-kjnhj" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="registry-server" probeResult="failure" output=< Jan 28 17:11:22 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:11:22 crc kubenswrapper[4877]: > Jan 28 17:11:28 crc kubenswrapper[4877]: I0128 17:11:28.699232 4877 scope.go:117] "RemoveContainer" containerID="2a6bcb2c9bee3ea8986746fd04aa1f77232ae6fe9b206c22bb8a12505700f928" Jan 28 17:11:28 crc kubenswrapper[4877]: I0128 17:11:28.729810 4877 scope.go:117] "RemoveContainer" containerID="cb0f7f42332faecb63ab5cac702d89613ab5ae387157d537a0150334bccae91f" Jan 28 17:11:28 crc kubenswrapper[4877]: I0128 17:11:28.794019 4877 scope.go:117] "RemoveContainer" containerID="d67867eb4e420db2210dc7a867be431ead9a5ee36c8f4aeb0e89367ae7b38663" Jan 28 17:11:28 crc kubenswrapper[4877]: I0128 17:11:28.857966 4877 scope.go:117] "RemoveContainer" containerID="08f7c5897d2ff879abbf30d741b68b0330db81c5cc4d9849652a8f75af4661dc" Jan 28 17:11:28 crc kubenswrapper[4877]: I0128 17:11:28.920272 4877 scope.go:117] "RemoveContainer" containerID="6f8ccc3cfb7937a158f3c5e2724dae140de84091b694c21a2c9bb240940d4b6d" Jan 28 17:11:31 crc kubenswrapper[4877]: I0128 17:11:31.496659 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:31 crc kubenswrapper[4877]: I0128 17:11:31.569956 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:31 crc kubenswrapper[4877]: I0128 17:11:31.739587 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjnhj"] Jan 28 17:11:32 crc kubenswrapper[4877]: I0128 17:11:32.553539 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kjnhj" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="registry-server" containerID="cri-o://81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb" gracePeriod=2 Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.083074 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.099790 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-catalog-content\") pod \"a2933622-09bb-4395-baf4-32cae3257b5f\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.099894 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-utilities\") pod \"a2933622-09bb-4395-baf4-32cae3257b5f\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.100778 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph89s\" (UniqueName: \"kubernetes.io/projected/a2933622-09bb-4395-baf4-32cae3257b5f-kube-api-access-ph89s\") pod \"a2933622-09bb-4395-baf4-32cae3257b5f\" (UID: \"a2933622-09bb-4395-baf4-32cae3257b5f\") " Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.101458 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-utilities" (OuterVolumeSpecName: "utilities") pod "a2933622-09bb-4395-baf4-32cae3257b5f" (UID: "a2933622-09bb-4395-baf4-32cae3257b5f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.102359 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.107821 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2933622-09bb-4395-baf4-32cae3257b5f-kube-api-access-ph89s" (OuterVolumeSpecName: "kube-api-access-ph89s") pod "a2933622-09bb-4395-baf4-32cae3257b5f" (UID: "a2933622-09bb-4395-baf4-32cae3257b5f"). InnerVolumeSpecName "kube-api-access-ph89s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.121809 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2933622-09bb-4395-baf4-32cae3257b5f" (UID: "a2933622-09bb-4395-baf4-32cae3257b5f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.203845 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2933622-09bb-4395-baf4-32cae3257b5f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.203885 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph89s\" (UniqueName: \"kubernetes.io/projected/a2933622-09bb-4395-baf4-32cae3257b5f-kube-api-access-ph89s\") on node \"crc\" DevicePath \"\"" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.593825 4877 generic.go:334] "Generic (PLEG): container finished" podID="a2933622-09bb-4395-baf4-32cae3257b5f" containerID="81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb" exitCode=0 Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.593890 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjnhj" event={"ID":"a2933622-09bb-4395-baf4-32cae3257b5f","Type":"ContainerDied","Data":"81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb"} Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.593928 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kjnhj" event={"ID":"a2933622-09bb-4395-baf4-32cae3257b5f","Type":"ContainerDied","Data":"82ea7f0aa9657ef71f8bd5afaab23962a19ec28efd2c67b743a2f3a9a9dbeaac"} Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.593967 4877 scope.go:117] "RemoveContainer" containerID="81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.594285 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kjnhj" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.635554 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjnhj"] Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.638180 4877 scope.go:117] "RemoveContainer" containerID="f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.652081 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kjnhj"] Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.667639 4877 scope.go:117] "RemoveContainer" containerID="8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.728234 4877 scope.go:117] "RemoveContainer" containerID="81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb" Jan 28 17:11:33 crc kubenswrapper[4877]: E0128 17:11:33.728985 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb\": container with ID starting with 81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb not found: ID does not exist" containerID="81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.729039 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb"} err="failed to get container status \"81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb\": rpc error: code = NotFound desc = could not find container \"81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb\": container with ID starting with 81fe9e3b14240961c59bfd5bbd53712c117d1b3418bb2e518f0f3a7cbdc309bb not found: ID does not exist" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.729066 4877 scope.go:117] "RemoveContainer" containerID="f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c" Jan 28 17:11:33 crc kubenswrapper[4877]: E0128 17:11:33.729403 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c\": container with ID starting with f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c not found: ID does not exist" containerID="f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.729450 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c"} err="failed to get container status \"f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c\": rpc error: code = NotFound desc = could not find container \"f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c\": container with ID starting with f9b8397be44d1b70c08e5f44b727111068931ae457bbd16a1ff390a8036e737c not found: ID does not exist" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.729516 4877 scope.go:117] "RemoveContainer" containerID="8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9" Jan 28 17:11:33 crc kubenswrapper[4877]: E0128 17:11:33.729837 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9\": container with ID starting with 8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9 not found: ID does not exist" containerID="8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9" Jan 28 17:11:33 crc kubenswrapper[4877]: I0128 17:11:33.729881 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9"} err="failed to get container status \"8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9\": rpc error: code = NotFound desc = could not find container \"8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9\": container with ID starting with 8e4b8a241a7b1ccf7c6aa8a8f2d84898472453af4668859a56b8e17c0c04a2c9 not found: ID does not exist" Jan 28 17:11:35 crc kubenswrapper[4877]: I0128 17:11:35.344375 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" path="/var/lib/kubelet/pods/a2933622-09bb-4395-baf4-32cae3257b5f/volumes" Jan 28 17:11:37 crc kubenswrapper[4877]: I0128 17:11:37.076875 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:11:37 crc kubenswrapper[4877]: I0128 17:11:37.077758 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:11:45 crc kubenswrapper[4877]: I0128 17:11:45.057673 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-4wxn6"] Jan 28 17:11:45 crc kubenswrapper[4877]: I0128 17:11:45.069794 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-4wxn6"] Jan 28 17:11:45 crc kubenswrapper[4877]: I0128 17:11:45.343838 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc6b6b48-855c-412b-af8b-be4c27962c4b" path="/var/lib/kubelet/pods/dc6b6b48-855c-412b-af8b-be4c27962c4b/volumes" Jan 28 17:11:49 crc kubenswrapper[4877]: I0128 17:11:49.071013 4877 generic.go:334] "Generic (PLEG): container finished" podID="1c676b41-42b6-44a8-92b5-ee8f883e2793" containerID="41699e90fc11b99784b887805a4624cbff8f8c1924054515b3fe2fadc209f971" exitCode=0 Jan 28 17:11:49 crc kubenswrapper[4877]: I0128 17:11:49.071611 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" event={"ID":"1c676b41-42b6-44a8-92b5-ee8f883e2793","Type":"ContainerDied","Data":"41699e90fc11b99784b887805a4624cbff8f8c1924054515b3fe2fadc209f971"} Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.581882 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.714491 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-ssh-key-openstack-edpm-ipam\") pod \"1c676b41-42b6-44a8-92b5-ee8f883e2793\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.714949 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-inventory\") pod \"1c676b41-42b6-44a8-92b5-ee8f883e2793\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.715740 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-bootstrap-combined-ca-bundle\") pod \"1c676b41-42b6-44a8-92b5-ee8f883e2793\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.715972 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6t4qj\" (UniqueName: \"kubernetes.io/projected/1c676b41-42b6-44a8-92b5-ee8f883e2793-kube-api-access-6t4qj\") pod \"1c676b41-42b6-44a8-92b5-ee8f883e2793\" (UID: \"1c676b41-42b6-44a8-92b5-ee8f883e2793\") " Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.722111 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "1c676b41-42b6-44a8-92b5-ee8f883e2793" (UID: "1c676b41-42b6-44a8-92b5-ee8f883e2793"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.722310 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c676b41-42b6-44a8-92b5-ee8f883e2793-kube-api-access-6t4qj" (OuterVolumeSpecName: "kube-api-access-6t4qj") pod "1c676b41-42b6-44a8-92b5-ee8f883e2793" (UID: "1c676b41-42b6-44a8-92b5-ee8f883e2793"). InnerVolumeSpecName "kube-api-access-6t4qj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.755400 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-inventory" (OuterVolumeSpecName: "inventory") pod "1c676b41-42b6-44a8-92b5-ee8f883e2793" (UID: "1c676b41-42b6-44a8-92b5-ee8f883e2793"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.759787 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1c676b41-42b6-44a8-92b5-ee8f883e2793" (UID: "1c676b41-42b6-44a8-92b5-ee8f883e2793"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.820268 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6t4qj\" (UniqueName: \"kubernetes.io/projected/1c676b41-42b6-44a8-92b5-ee8f883e2793-kube-api-access-6t4qj\") on node \"crc\" DevicePath \"\"" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.820325 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.820341 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:11:50 crc kubenswrapper[4877]: I0128 17:11:50.820352 4877 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c676b41-42b6-44a8-92b5-ee8f883e2793-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.096275 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" event={"ID":"1c676b41-42b6-44a8-92b5-ee8f883e2793","Type":"ContainerDied","Data":"0f6a5ae339ea31001a622f2e480d28c3a5d6531c84a40271eee6ffc7302fd37c"} Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.096546 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f6a5ae339ea31001a622f2e480d28c3a5d6531c84a40271eee6ffc7302fd37c" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.096323 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-56kcf" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.185224 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v"] Jan 28 17:11:51 crc kubenswrapper[4877]: E0128 17:11:51.185810 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="extract-content" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.185832 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="extract-content" Jan 28 17:11:51 crc kubenswrapper[4877]: E0128 17:11:51.185850 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c676b41-42b6-44a8-92b5-ee8f883e2793" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.185857 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c676b41-42b6-44a8-92b5-ee8f883e2793" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 17:11:51 crc kubenswrapper[4877]: E0128 17:11:51.185897 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="registry-server" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.185903 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="registry-server" Jan 28 17:11:51 crc kubenswrapper[4877]: E0128 17:11:51.185921 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="extract-utilities" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.185927 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="extract-utilities" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.186179 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2933622-09bb-4395-baf4-32cae3257b5f" containerName="registry-server" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.186195 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c676b41-42b6-44a8-92b5-ee8f883e2793" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.187139 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.190092 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.190305 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.198556 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.202852 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.212999 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v"] Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.338044 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n28tr\" (UniqueName: \"kubernetes.io/projected/4e93507a-6405-4506-8ff9-c832187ec6f2-kube-api-access-n28tr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.338128 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.338306 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.441120 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.441511 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n28tr\" (UniqueName: \"kubernetes.io/projected/4e93507a-6405-4506-8ff9-c832187ec6f2-kube-api-access-n28tr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.441578 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.446715 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.447438 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.460249 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n28tr\" (UniqueName: \"kubernetes.io/projected/4e93507a-6405-4506-8ff9-c832187ec6f2-kube-api-access-n28tr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-phn2v\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:51 crc kubenswrapper[4877]: I0128 17:11:51.507040 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:11:52 crc kubenswrapper[4877]: I0128 17:11:52.073373 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v"] Jan 28 17:11:52 crc kubenswrapper[4877]: I0128 17:11:52.107992 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" event={"ID":"4e93507a-6405-4506-8ff9-c832187ec6f2","Type":"ContainerStarted","Data":"39502d8b1e911e263f29a7b62b3af83a12114a369bf1912ce22101b43b34190f"} Jan 28 17:11:53 crc kubenswrapper[4877]: I0128 17:11:53.120782 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" event={"ID":"4e93507a-6405-4506-8ff9-c832187ec6f2","Type":"ContainerStarted","Data":"a93a633ae344e9ee311d95d7840a62f40f83d80bcce883fa1e63dd85fd7db232"} Jan 28 17:11:53 crc kubenswrapper[4877]: I0128 17:11:53.142773 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" podStartSLOduration=1.6713287879999998 podStartE2EDuration="2.142756722s" podCreationTimestamp="2026-01-28 17:11:51 +0000 UTC" firstStartedPulling="2026-01-28 17:11:52.068738775 +0000 UTC m=+2215.627065663" lastFinishedPulling="2026-01-28 17:11:52.540166709 +0000 UTC m=+2216.098493597" observedRunningTime="2026-01-28 17:11:53.135647441 +0000 UTC m=+2216.693974349" watchObservedRunningTime="2026-01-28 17:11:53.142756722 +0000 UTC m=+2216.701083610" Jan 28 17:11:56 crc kubenswrapper[4877]: I0128 17:11:56.039088 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-417f-account-create-update-d9sds"] Jan 28 17:11:56 crc kubenswrapper[4877]: I0128 17:11:56.056907 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-417f-account-create-update-d9sds"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.039431 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-71e2-account-create-update-xnpjr"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.050574 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-ph4bn"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.063431 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-71e2-account-create-update-xnpjr"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.076244 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-tkw8b"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.087350 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-dfxx8"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.097274 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-ph4bn"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.107571 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c6e0-account-create-update-stjf9"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.120036 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-tkw8b"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.130644 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-dfxx8"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.141384 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c6e0-account-create-update-stjf9"] Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.347031 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11c0eeb0-1466-4faf-ae33-e74028802131" path="/var/lib/kubelet/pods/11c0eeb0-1466-4faf-ae33-e74028802131/volumes" Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.348020 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19c75c24-13dd-439a-9245-c02f9e6d8ec7" path="/var/lib/kubelet/pods/19c75c24-13dd-439a-9245-c02f9e6d8ec7/volumes" Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.348780 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59e30121-a8ea-4d16-8b90-659a6158def9" path="/var/lib/kubelet/pods/59e30121-a8ea-4d16-8b90-659a6158def9/volumes" Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.349517 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="764294ed-9715-4d81-b7b6-50a4104630fd" path="/var/lib/kubelet/pods/764294ed-9715-4d81-b7b6-50a4104630fd/volumes" Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.350699 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7749a4b7-d909-4b9a-ae04-acfcf29f916c" path="/var/lib/kubelet/pods/7749a4b7-d909-4b9a-ae04-acfcf29f916c/volumes" Jan 28 17:11:57 crc kubenswrapper[4877]: I0128 17:11:57.351309 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5" path="/var/lib/kubelet/pods/b93fae4a-bd56-4b8e-b1f3-06cf0375c1c5/volumes" Jan 28 17:12:07 crc kubenswrapper[4877]: I0128 17:12:07.076807 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:12:07 crc kubenswrapper[4877]: I0128 17:12:07.077387 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:12:29 crc kubenswrapper[4877]: I0128 17:12:29.075858 4877 scope.go:117] "RemoveContainer" containerID="57e59cfe9b20569e5d58de104a96a90907bfa62460cbc2c52b0403ef9d58b8e5" Jan 28 17:12:29 crc kubenswrapper[4877]: I0128 17:12:29.100927 4877 scope.go:117] "RemoveContainer" containerID="cdc5f8e73929f9f07db943b054d39c565bf0723db74a347c03ddb7c4048b43d3" Jan 28 17:12:29 crc kubenswrapper[4877]: I0128 17:12:29.183311 4877 scope.go:117] "RemoveContainer" containerID="2c34628420e9aad56823dc99220a1a6b33d83ce51784eabcd61d4e4965c7cf31" Jan 28 17:12:29 crc kubenswrapper[4877]: I0128 17:12:29.238813 4877 scope.go:117] "RemoveContainer" containerID="ff0be326d382c6cd9d4ca6cabaab5c8e0cbe07f44a3ad3d4463877f461c4b0dc" Jan 28 17:12:29 crc kubenswrapper[4877]: I0128 17:12:29.302692 4877 scope.go:117] "RemoveContainer" containerID="eb81e2c8faaa1999538f948fe9fa6e7d0ba86c30a6c12af6a1d62326f86b11c9" Jan 28 17:12:29 crc kubenswrapper[4877]: I0128 17:12:29.370282 4877 scope.go:117] "RemoveContainer" containerID="9d6ab7a208b419836251b13cfebbbb207c58e5cda6e8ceadb24190892c535403" Jan 28 17:12:29 crc kubenswrapper[4877]: I0128 17:12:29.433915 4877 scope.go:117] "RemoveContainer" containerID="977237e45e52b07423b5f28e12b0b8956c1da080a861b7df00800244b5c454fb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.685538 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vlpgb"] Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.689384 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.697700 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vlpgb"] Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.747664 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-utilities\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.747974 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89w9p\" (UniqueName: \"kubernetes.io/projected/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-kube-api-access-89w9p\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.748654 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-catalog-content\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.850909 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-catalog-content\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.851043 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-utilities\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.851092 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89w9p\" (UniqueName: \"kubernetes.io/projected/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-kube-api-access-89w9p\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.851376 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-catalog-content\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.851539 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-utilities\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:32 crc kubenswrapper[4877]: I0128 17:12:32.871677 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89w9p\" (UniqueName: \"kubernetes.io/projected/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-kube-api-access-89w9p\") pod \"certified-operators-vlpgb\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:33 crc kubenswrapper[4877]: I0128 17:12:33.022083 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:33 crc kubenswrapper[4877]: I0128 17:12:33.536182 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vlpgb"] Jan 28 17:12:33 crc kubenswrapper[4877]: I0128 17:12:33.606250 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vlpgb" event={"ID":"d07e39ae-214a-4bca-b2d0-3bd639caf0c4","Type":"ContainerStarted","Data":"050153154daa62cac99eeb2ff507d6c343e53bdc00557437ea0ab320487f761d"} Jan 28 17:12:34 crc kubenswrapper[4877]: I0128 17:12:34.620461 4877 generic.go:334] "Generic (PLEG): container finished" podID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerID="5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47" exitCode=0 Jan 28 17:12:34 crc kubenswrapper[4877]: I0128 17:12:34.620677 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vlpgb" event={"ID":"d07e39ae-214a-4bca-b2d0-3bd639caf0c4","Type":"ContainerDied","Data":"5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47"} Jan 28 17:12:35 crc kubenswrapper[4877]: I0128 17:12:35.632402 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vlpgb" event={"ID":"d07e39ae-214a-4bca-b2d0-3bd639caf0c4","Type":"ContainerStarted","Data":"7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2"} Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.076223 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.076810 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.076893 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.078451 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.078699 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" gracePeriod=600 Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.652425 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" exitCode=0 Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.652761 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3"} Jan 28 17:12:37 crc kubenswrapper[4877]: I0128 17:12:37.652829 4877 scope.go:117] "RemoveContainer" containerID="c7f0577d72207bd89c21d8eb9092633da62ac51cdf714b1a1e5d82fcc0a22555" Jan 28 17:12:37 crc kubenswrapper[4877]: E0128 17:12:37.760791 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:12:38 crc kubenswrapper[4877]: I0128 17:12:38.666732 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:12:38 crc kubenswrapper[4877]: E0128 17:12:38.667266 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:12:38 crc kubenswrapper[4877]: I0128 17:12:38.698404 4877 generic.go:334] "Generic (PLEG): container finished" podID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerID="7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2" exitCode=0 Jan 28 17:12:38 crc kubenswrapper[4877]: I0128 17:12:38.698466 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vlpgb" event={"ID":"d07e39ae-214a-4bca-b2d0-3bd639caf0c4","Type":"ContainerDied","Data":"7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2"} Jan 28 17:12:40 crc kubenswrapper[4877]: I0128 17:12:40.722247 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vlpgb" event={"ID":"d07e39ae-214a-4bca-b2d0-3bd639caf0c4","Type":"ContainerStarted","Data":"a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2"} Jan 28 17:12:40 crc kubenswrapper[4877]: I0128 17:12:40.748201 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vlpgb" podStartSLOduration=3.8821452020000002 podStartE2EDuration="8.748179772s" podCreationTimestamp="2026-01-28 17:12:32 +0000 UTC" firstStartedPulling="2026-01-28 17:12:34.624602467 +0000 UTC m=+2258.182929355" lastFinishedPulling="2026-01-28 17:12:39.490637037 +0000 UTC m=+2263.048963925" observedRunningTime="2026-01-28 17:12:40.743171557 +0000 UTC m=+2264.301498465" watchObservedRunningTime="2026-01-28 17:12:40.748179772 +0000 UTC m=+2264.306506650" Jan 28 17:12:43 crc kubenswrapper[4877]: I0128 17:12:43.022412 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:43 crc kubenswrapper[4877]: I0128 17:12:43.023880 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:44 crc kubenswrapper[4877]: I0128 17:12:44.075975 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vlpgb" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="registry-server" probeResult="failure" output=< Jan 28 17:12:44 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:12:44 crc kubenswrapper[4877]: > Jan 28 17:12:51 crc kubenswrapper[4877]: I0128 17:12:51.330796 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:12:51 crc kubenswrapper[4877]: E0128 17:12:51.331598 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:12:53 crc kubenswrapper[4877]: I0128 17:12:53.070848 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:53 crc kubenswrapper[4877]: I0128 17:12:53.139460 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:53 crc kubenswrapper[4877]: I0128 17:12:53.310836 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vlpgb"] Jan 28 17:12:54 crc kubenswrapper[4877]: I0128 17:12:54.894279 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vlpgb" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="registry-server" containerID="cri-o://a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2" gracePeriod=2 Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.424357 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.525459 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89w9p\" (UniqueName: \"kubernetes.io/projected/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-kube-api-access-89w9p\") pod \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.525623 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-catalog-content\") pod \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.525889 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-utilities\") pod \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\" (UID: \"d07e39ae-214a-4bca-b2d0-3bd639caf0c4\") " Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.526799 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-utilities" (OuterVolumeSpecName: "utilities") pod "d07e39ae-214a-4bca-b2d0-3bd639caf0c4" (UID: "d07e39ae-214a-4bca-b2d0-3bd639caf0c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.536758 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-kube-api-access-89w9p" (OuterVolumeSpecName: "kube-api-access-89w9p") pod "d07e39ae-214a-4bca-b2d0-3bd639caf0c4" (UID: "d07e39ae-214a-4bca-b2d0-3bd639caf0c4"). InnerVolumeSpecName "kube-api-access-89w9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.574215 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d07e39ae-214a-4bca-b2d0-3bd639caf0c4" (UID: "d07e39ae-214a-4bca-b2d0-3bd639caf0c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.628700 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.628733 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.628744 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89w9p\" (UniqueName: \"kubernetes.io/projected/d07e39ae-214a-4bca-b2d0-3bd639caf0c4-kube-api-access-89w9p\") on node \"crc\" DevicePath \"\"" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.906664 4877 generic.go:334] "Generic (PLEG): container finished" podID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerID="a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2" exitCode=0 Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.906774 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vlpgb" event={"ID":"d07e39ae-214a-4bca-b2d0-3bd639caf0c4","Type":"ContainerDied","Data":"a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2"} Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.907044 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vlpgb" event={"ID":"d07e39ae-214a-4bca-b2d0-3bd639caf0c4","Type":"ContainerDied","Data":"050153154daa62cac99eeb2ff507d6c343e53bdc00557437ea0ab320487f761d"} Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.907074 4877 scope.go:117] "RemoveContainer" containerID="a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.906886 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vlpgb" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.927552 4877 scope.go:117] "RemoveContainer" containerID="7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.951531 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vlpgb"] Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.955521 4877 scope.go:117] "RemoveContainer" containerID="5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47" Jan 28 17:12:55 crc kubenswrapper[4877]: I0128 17:12:55.963809 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vlpgb"] Jan 28 17:12:56 crc kubenswrapper[4877]: I0128 17:12:56.015170 4877 scope.go:117] "RemoveContainer" containerID="a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2" Jan 28 17:12:56 crc kubenswrapper[4877]: E0128 17:12:56.015851 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2\": container with ID starting with a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2 not found: ID does not exist" containerID="a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2" Jan 28 17:12:56 crc kubenswrapper[4877]: I0128 17:12:56.015886 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2"} err="failed to get container status \"a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2\": rpc error: code = NotFound desc = could not find container \"a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2\": container with ID starting with a184bdbffbf08db6c77d24585d3bb60cf8e3a4661e40f57b3b32097544aaa7c2 not found: ID does not exist" Jan 28 17:12:56 crc kubenswrapper[4877]: I0128 17:12:56.015908 4877 scope.go:117] "RemoveContainer" containerID="7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2" Jan 28 17:12:56 crc kubenswrapper[4877]: E0128 17:12:56.019217 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2\": container with ID starting with 7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2 not found: ID does not exist" containerID="7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2" Jan 28 17:12:56 crc kubenswrapper[4877]: I0128 17:12:56.019280 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2"} err="failed to get container status \"7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2\": rpc error: code = NotFound desc = could not find container \"7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2\": container with ID starting with 7c854cc84d59280233105223e60668399b0a0e8edbec21b5488b716bf5cdfdc2 not found: ID does not exist" Jan 28 17:12:56 crc kubenswrapper[4877]: I0128 17:12:56.019321 4877 scope.go:117] "RemoveContainer" containerID="5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47" Jan 28 17:12:56 crc kubenswrapper[4877]: E0128 17:12:56.019792 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47\": container with ID starting with 5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47 not found: ID does not exist" containerID="5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47" Jan 28 17:12:56 crc kubenswrapper[4877]: I0128 17:12:56.019877 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47"} err="failed to get container status \"5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47\": rpc error: code = NotFound desc = could not find container \"5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47\": container with ID starting with 5a5cfd4fbd606f32da005aa5dd84fa2cc3494d3e03581c3d36f52a5554b3bb47 not found: ID does not exist" Jan 28 17:12:57 crc kubenswrapper[4877]: I0128 17:12:57.351288 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" path="/var/lib/kubelet/pods/d07e39ae-214a-4bca-b2d0-3bd639caf0c4/volumes" Jan 28 17:13:01 crc kubenswrapper[4877]: I0128 17:13:01.045069 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-j2jrv"] Jan 28 17:13:01 crc kubenswrapper[4877]: I0128 17:13:01.057270 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-j2jrv"] Jan 28 17:13:01 crc kubenswrapper[4877]: I0128 17:13:01.342683 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e828bbc2-14a9-4ffd-8867-065b294666f4" path="/var/lib/kubelet/pods/e828bbc2-14a9-4ffd-8867-065b294666f4/volumes" Jan 28 17:13:03 crc kubenswrapper[4877]: I0128 17:13:03.038035 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-1655-account-create-update-vkpnq"] Jan 28 17:13:03 crc kubenswrapper[4877]: I0128 17:13:03.052920 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-1655-account-create-update-vkpnq"] Jan 28 17:13:03 crc kubenswrapper[4877]: I0128 17:13:03.342908 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2be3e552-8300-424e-90d5-93a278509c6b" path="/var/lib/kubelet/pods/2be3e552-8300-424e-90d5-93a278509c6b/volumes" Jan 28 17:13:05 crc kubenswrapper[4877]: I0128 17:13:05.331787 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:13:05 crc kubenswrapper[4877]: E0128 17:13:05.332492 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:13:17 crc kubenswrapper[4877]: I0128 17:13:17.330407 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:13:17 crc kubenswrapper[4877]: E0128 17:13:17.331168 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:13:29 crc kubenswrapper[4877]: I0128 17:13:29.600415 4877 scope.go:117] "RemoveContainer" containerID="2b676a191a14c327deca49069f39db8a9f67aa7ccdd4c2f4f18e7ccf7daba4ce" Jan 28 17:13:29 crc kubenswrapper[4877]: I0128 17:13:29.631217 4877 scope.go:117] "RemoveContainer" containerID="6cd98f618fe3677e799bab882f16f43fe0eec5ae61f33694d156a073118aca58" Jan 28 17:13:30 crc kubenswrapper[4877]: I0128 17:13:30.331053 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:13:30 crc kubenswrapper[4877]: E0128 17:13:30.331375 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:13:38 crc kubenswrapper[4877]: I0128 17:13:38.058414 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-j4pm4"] Jan 28 17:13:38 crc kubenswrapper[4877]: I0128 17:13:38.069718 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-j4pm4"] Jan 28 17:13:39 crc kubenswrapper[4877]: I0128 17:13:39.364148 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0456757-a0e3-42a7-900f-422828fe9836" path="/var/lib/kubelet/pods/d0456757-a0e3-42a7-900f-422828fe9836/volumes" Jan 28 17:13:41 crc kubenswrapper[4877]: I0128 17:13:41.333764 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:13:41 crc kubenswrapper[4877]: E0128 17:13:41.334153 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:13:44 crc kubenswrapper[4877]: I0128 17:13:44.456551 4877 generic.go:334] "Generic (PLEG): container finished" podID="4e93507a-6405-4506-8ff9-c832187ec6f2" containerID="a93a633ae344e9ee311d95d7840a62f40f83d80bcce883fa1e63dd85fd7db232" exitCode=0 Jan 28 17:13:44 crc kubenswrapper[4877]: I0128 17:13:44.456607 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" event={"ID":"4e93507a-6405-4506-8ff9-c832187ec6f2","Type":"ContainerDied","Data":"a93a633ae344e9ee311d95d7840a62f40f83d80bcce883fa1e63dd85fd7db232"} Jan 28 17:13:45 crc kubenswrapper[4877]: I0128 17:13:45.991627 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.101400 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n28tr\" (UniqueName: \"kubernetes.io/projected/4e93507a-6405-4506-8ff9-c832187ec6f2-kube-api-access-n28tr\") pod \"4e93507a-6405-4506-8ff9-c832187ec6f2\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.101663 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-inventory\") pod \"4e93507a-6405-4506-8ff9-c832187ec6f2\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.101696 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-ssh-key-openstack-edpm-ipam\") pod \"4e93507a-6405-4506-8ff9-c832187ec6f2\" (UID: \"4e93507a-6405-4506-8ff9-c832187ec6f2\") " Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.108450 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e93507a-6405-4506-8ff9-c832187ec6f2-kube-api-access-n28tr" (OuterVolumeSpecName: "kube-api-access-n28tr") pod "4e93507a-6405-4506-8ff9-c832187ec6f2" (UID: "4e93507a-6405-4506-8ff9-c832187ec6f2"). InnerVolumeSpecName "kube-api-access-n28tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.137893 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-inventory" (OuterVolumeSpecName: "inventory") pod "4e93507a-6405-4506-8ff9-c832187ec6f2" (UID: "4e93507a-6405-4506-8ff9-c832187ec6f2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.142076 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4e93507a-6405-4506-8ff9-c832187ec6f2" (UID: "4e93507a-6405-4506-8ff9-c832187ec6f2"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.205183 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.205244 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4e93507a-6405-4506-8ff9-c832187ec6f2-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.205262 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n28tr\" (UniqueName: \"kubernetes.io/projected/4e93507a-6405-4506-8ff9-c832187ec6f2-kube-api-access-n28tr\") on node \"crc\" DevicePath \"\"" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.476085 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" event={"ID":"4e93507a-6405-4506-8ff9-c832187ec6f2","Type":"ContainerDied","Data":"39502d8b1e911e263f29a7b62b3af83a12114a369bf1912ce22101b43b34190f"} Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.476127 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39502d8b1e911e263f29a7b62b3af83a12114a369bf1912ce22101b43b34190f" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.476162 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-phn2v" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.559948 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5"] Jan 28 17:13:46 crc kubenswrapper[4877]: E0128 17:13:46.560497 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="registry-server" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.560517 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="registry-server" Jan 28 17:13:46 crc kubenswrapper[4877]: E0128 17:13:46.560536 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="extract-content" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.560544 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="extract-content" Jan 28 17:13:46 crc kubenswrapper[4877]: E0128 17:13:46.560572 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e93507a-6405-4506-8ff9-c832187ec6f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.560584 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e93507a-6405-4506-8ff9-c832187ec6f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 17:13:46 crc kubenswrapper[4877]: E0128 17:13:46.560603 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="extract-utilities" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.560609 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="extract-utilities" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.560998 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e93507a-6405-4506-8ff9-c832187ec6f2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.561015 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d07e39ae-214a-4bca-b2d0-3bd639caf0c4" containerName="registry-server" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.562021 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.564974 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.565239 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.565415 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.566715 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.581393 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5"] Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.717316 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.717653 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgk2b\" (UniqueName: \"kubernetes.io/projected/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-kube-api-access-qgk2b\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.717829 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.820529 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.820649 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.820727 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgk2b\" (UniqueName: \"kubernetes.io/projected/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-kube-api-access-qgk2b\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.824661 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.825324 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.839647 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgk2b\" (UniqueName: \"kubernetes.io/projected/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-kube-api-access-qgk2b\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:46 crc kubenswrapper[4877]: I0128 17:13:46.880080 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:13:47 crc kubenswrapper[4877]: I0128 17:13:47.503542 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5"] Jan 28 17:13:47 crc kubenswrapper[4877]: I0128 17:13:47.510434 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:13:48 crc kubenswrapper[4877]: I0128 17:13:48.498795 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" event={"ID":"0c119496-6dcd-4825-b6ab-b1c11a41b9c9","Type":"ContainerStarted","Data":"c97b271c01fd6c682fbab18d070896c33ea1538715a9574e9a11ce1b7c021513"} Jan 28 17:13:48 crc kubenswrapper[4877]: I0128 17:13:48.499544 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" event={"ID":"0c119496-6dcd-4825-b6ab-b1c11a41b9c9","Type":"ContainerStarted","Data":"d747721559b26833c4fc9c6723093108390ec1e728c215fd6513cae9725545bb"} Jan 28 17:13:48 crc kubenswrapper[4877]: I0128 17:13:48.519410 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" podStartSLOduration=2.093264548 podStartE2EDuration="2.519391936s" podCreationTimestamp="2026-01-28 17:13:46 +0000 UTC" firstStartedPulling="2026-01-28 17:13:47.510178505 +0000 UTC m=+2331.068505393" lastFinishedPulling="2026-01-28 17:13:47.936305883 +0000 UTC m=+2331.494632781" observedRunningTime="2026-01-28 17:13:48.513137357 +0000 UTC m=+2332.071464245" watchObservedRunningTime="2026-01-28 17:13:48.519391936 +0000 UTC m=+2332.077718824" Jan 28 17:13:56 crc kubenswrapper[4877]: I0128 17:13:56.330963 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:13:56 crc kubenswrapper[4877]: E0128 17:13:56.331824 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:13:59 crc kubenswrapper[4877]: I0128 17:13:59.042797 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-lpr6d"] Jan 28 17:13:59 crc kubenswrapper[4877]: I0128 17:13:59.061676 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-lpr6d"] Jan 28 17:13:59 crc kubenswrapper[4877]: I0128 17:13:59.346305 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5b7f132-af69-43a8-8771-c039c8039a35" path="/var/lib/kubelet/pods/e5b7f132-af69-43a8-8771-c039c8039a35/volumes" Jan 28 17:14:00 crc kubenswrapper[4877]: I0128 17:14:00.030843 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f5bgl"] Jan 28 17:14:00 crc kubenswrapper[4877]: I0128 17:14:00.043152 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-f5bgl"] Jan 28 17:14:01 crc kubenswrapper[4877]: I0128 17:14:01.346064 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4954890-1a90-4904-a7d8-f286d1b56745" path="/var/lib/kubelet/pods/e4954890-1a90-4904-a7d8-f286d1b56745/volumes" Jan 28 17:14:10 crc kubenswrapper[4877]: I0128 17:14:10.331251 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:14:10 crc kubenswrapper[4877]: E0128 17:14:10.332061 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:14:22 crc kubenswrapper[4877]: I0128 17:14:22.331124 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:14:22 crc kubenswrapper[4877]: E0128 17:14:22.331793 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:14:29 crc kubenswrapper[4877]: I0128 17:14:29.746887 4877 scope.go:117] "RemoveContainer" containerID="97f20036790abdca2e5eceae7ba75fd06efad3ef727f8220c097ad21982b0b86" Jan 28 17:14:29 crc kubenswrapper[4877]: I0128 17:14:29.784125 4877 scope.go:117] "RemoveContainer" containerID="05cf205cb7271560cfdd0cac3ffd18218092b8634d3059295d86db5e3ec969aa" Jan 28 17:14:29 crc kubenswrapper[4877]: I0128 17:14:29.864440 4877 scope.go:117] "RemoveContainer" containerID="280d1e05976bf316ebf7fa080e91b03cd06512ba323bcc8e9eb0e6c4179b8f80" Jan 28 17:14:37 crc kubenswrapper[4877]: I0128 17:14:37.340395 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:14:37 crc kubenswrapper[4877]: E0128 17:14:37.341539 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:14:45 crc kubenswrapper[4877]: I0128 17:14:45.044427 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-8jrbg"] Jan 28 17:14:45 crc kubenswrapper[4877]: I0128 17:14:45.059379 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-8jrbg"] Jan 28 17:14:45 crc kubenswrapper[4877]: I0128 17:14:45.352774 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8e73c54-63fa-450f-8d23-9d566575569e" path="/var/lib/kubelet/pods/b8e73c54-63fa-450f-8d23-9d566575569e/volumes" Jan 28 17:14:49 crc kubenswrapper[4877]: I0128 17:14:49.332966 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:14:49 crc kubenswrapper[4877]: E0128 17:14:49.334162 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:14:58 crc kubenswrapper[4877]: I0128 17:14:58.264674 4877 generic.go:334] "Generic (PLEG): container finished" podID="0c119496-6dcd-4825-b6ab-b1c11a41b9c9" containerID="c97b271c01fd6c682fbab18d070896c33ea1538715a9574e9a11ce1b7c021513" exitCode=0 Jan 28 17:14:58 crc kubenswrapper[4877]: I0128 17:14:58.264788 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" event={"ID":"0c119496-6dcd-4825-b6ab-b1c11a41b9c9","Type":"ContainerDied","Data":"c97b271c01fd6c682fbab18d070896c33ea1538715a9574e9a11ce1b7c021513"} Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.732948 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.860085 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-inventory\") pod \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.860423 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgk2b\" (UniqueName: \"kubernetes.io/projected/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-kube-api-access-qgk2b\") pod \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.861143 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-ssh-key-openstack-edpm-ipam\") pod \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\" (UID: \"0c119496-6dcd-4825-b6ab-b1c11a41b9c9\") " Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.868843 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-kube-api-access-qgk2b" (OuterVolumeSpecName: "kube-api-access-qgk2b") pod "0c119496-6dcd-4825-b6ab-b1c11a41b9c9" (UID: "0c119496-6dcd-4825-b6ab-b1c11a41b9c9"). InnerVolumeSpecName "kube-api-access-qgk2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.901236 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0c119496-6dcd-4825-b6ab-b1c11a41b9c9" (UID: "0c119496-6dcd-4825-b6ab-b1c11a41b9c9"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.910216 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-inventory" (OuterVolumeSpecName: "inventory") pod "0c119496-6dcd-4825-b6ab-b1c11a41b9c9" (UID: "0c119496-6dcd-4825-b6ab-b1c11a41b9c9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.964828 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.964869 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qgk2b\" (UniqueName: \"kubernetes.io/projected/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-kube-api-access-qgk2b\") on node \"crc\" DevicePath \"\"" Jan 28 17:14:59 crc kubenswrapper[4877]: I0128 17:14:59.964878 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0c119496-6dcd-4825-b6ab-b1c11a41b9c9-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.191045 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs"] Jan 28 17:15:00 crc kubenswrapper[4877]: E0128 17:15:00.195344 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c119496-6dcd-4825-b6ab-b1c11a41b9c9" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.195393 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c119496-6dcd-4825-b6ab-b1c11a41b9c9" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.195783 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c119496-6dcd-4825-b6ab-b1c11a41b9c9" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.197094 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.202261 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.202691 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.217808 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs"] Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.272443 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z5z4\" (UniqueName: \"kubernetes.io/projected/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-kube-api-access-5z5z4\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.272540 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-secret-volume\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.273642 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-config-volume\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.295113 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" event={"ID":"0c119496-6dcd-4825-b6ab-b1c11a41b9c9","Type":"ContainerDied","Data":"d747721559b26833c4fc9c6723093108390ec1e728c215fd6513cae9725545bb"} Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.295158 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d747721559b26833c4fc9c6723093108390ec1e728c215fd6513cae9725545bb" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.295217 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hc8q5" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.377736 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-config-volume\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.377919 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z5z4\" (UniqueName: \"kubernetes.io/projected/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-kube-api-access-5z5z4\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.378394 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-secret-volume\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.379140 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-config-volume\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.385281 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-secret-volume\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.402592 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z5z4\" (UniqueName: \"kubernetes.io/projected/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-kube-api-access-5z5z4\") pod \"collect-profiles-29493675-fkxbs\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.432100 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz"] Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.434862 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.438215 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.438494 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.438674 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.438789 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.466748 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz"] Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.536823 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.584914 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mktj\" (UniqueName: \"kubernetes.io/projected/d2007094-f320-4e43-903f-20a3b7705f1a-kube-api-access-9mktj\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.585010 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.585057 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.687233 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mktj\" (UniqueName: \"kubernetes.io/projected/d2007094-f320-4e43-903f-20a3b7705f1a-kube-api-access-9mktj\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.687345 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.687398 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.696377 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.698401 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.729043 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mktj\" (UniqueName: \"kubernetes.io/projected/d2007094-f320-4e43-903f-20a3b7705f1a-kube-api-access-9mktj\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:00 crc kubenswrapper[4877]: I0128 17:15:00.791124 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:01 crc kubenswrapper[4877]: I0128 17:15:01.172859 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs"] Jan 28 17:15:01 crc kubenswrapper[4877]: W0128 17:15:01.176793 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57b5ce29_2759_4dc4_b3a1_3477aa0c11a4.slice/crio-4edc8a3fb28a8ff4c8913c93ecc7c86667f9bddb5877457155ee70a4fcb33080 WatchSource:0}: Error finding container 4edc8a3fb28a8ff4c8913c93ecc7c86667f9bddb5877457155ee70a4fcb33080: Status 404 returned error can't find the container with id 4edc8a3fb28a8ff4c8913c93ecc7c86667f9bddb5877457155ee70a4fcb33080 Jan 28 17:15:01 crc kubenswrapper[4877]: I0128 17:15:01.307083 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" event={"ID":"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4","Type":"ContainerStarted","Data":"4edc8a3fb28a8ff4c8913c93ecc7c86667f9bddb5877457155ee70a4fcb33080"} Jan 28 17:15:01 crc kubenswrapper[4877]: I0128 17:15:01.530583 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz"] Jan 28 17:15:01 crc kubenswrapper[4877]: W0128 17:15:01.534740 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2007094_f320_4e43_903f_20a3b7705f1a.slice/crio-8a1ac1abba785daf6d09e4532175a9c4bdca2be5de60260dc4917f9e42ab03fb WatchSource:0}: Error finding container 8a1ac1abba785daf6d09e4532175a9c4bdca2be5de60260dc4917f9e42ab03fb: Status 404 returned error can't find the container with id 8a1ac1abba785daf6d09e4532175a9c4bdca2be5de60260dc4917f9e42ab03fb Jan 28 17:15:02 crc kubenswrapper[4877]: I0128 17:15:02.324164 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" event={"ID":"d2007094-f320-4e43-903f-20a3b7705f1a","Type":"ContainerStarted","Data":"8a1ac1abba785daf6d09e4532175a9c4bdca2be5de60260dc4917f9e42ab03fb"} Jan 28 17:15:02 crc kubenswrapper[4877]: I0128 17:15:02.326517 4877 generic.go:334] "Generic (PLEG): container finished" podID="57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" containerID="1e1d54838a5a193d176591ac2145c35e1e2d1897044d531b71a10dcb28dceb16" exitCode=0 Jan 28 17:15:02 crc kubenswrapper[4877]: I0128 17:15:02.326556 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" event={"ID":"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4","Type":"ContainerDied","Data":"1e1d54838a5a193d176591ac2145c35e1e2d1897044d531b71a10dcb28dceb16"} Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.346536 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" event={"ID":"d2007094-f320-4e43-903f-20a3b7705f1a","Type":"ContainerStarted","Data":"adc35db7d3305977e95ba35915dac2a4b80df27d0ac795fe6ff22f03bbc7363b"} Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.368995 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" podStartSLOduration=2.897752598 podStartE2EDuration="3.368977849s" podCreationTimestamp="2026-01-28 17:15:00 +0000 UTC" firstStartedPulling="2026-01-28 17:15:01.53822686 +0000 UTC m=+2405.096553738" lastFinishedPulling="2026-01-28 17:15:02.009452101 +0000 UTC m=+2405.567778989" observedRunningTime="2026-01-28 17:15:03.367201252 +0000 UTC m=+2406.925528160" watchObservedRunningTime="2026-01-28 17:15:03.368977849 +0000 UTC m=+2406.927304737" Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.854861 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.913354 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5z5z4\" (UniqueName: \"kubernetes.io/projected/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-kube-api-access-5z5z4\") pod \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.913440 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-config-volume\") pod \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.913560 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-secret-volume\") pod \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\" (UID: \"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4\") " Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.916450 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-config-volume" (OuterVolumeSpecName: "config-volume") pod "57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" (UID: "57b5ce29-2759-4dc4-b3a1-3477aa0c11a4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.922625 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" (UID: "57b5ce29-2759-4dc4-b3a1-3477aa0c11a4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:15:03 crc kubenswrapper[4877]: I0128 17:15:03.922714 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-kube-api-access-5z5z4" (OuterVolumeSpecName: "kube-api-access-5z5z4") pod "57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" (UID: "57b5ce29-2759-4dc4-b3a1-3477aa0c11a4"). InnerVolumeSpecName "kube-api-access-5z5z4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.017082 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.017130 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.017179 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5z5z4\" (UniqueName: \"kubernetes.io/projected/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4-kube-api-access-5z5z4\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.331661 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:15:04 crc kubenswrapper[4877]: E0128 17:15:04.332522 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.364020 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.364060 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs" event={"ID":"57b5ce29-2759-4dc4-b3a1-3477aa0c11a4","Type":"ContainerDied","Data":"4edc8a3fb28a8ff4c8913c93ecc7c86667f9bddb5877457155ee70a4fcb33080"} Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.364086 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4edc8a3fb28a8ff4c8913c93ecc7c86667f9bddb5877457155ee70a4fcb33080" Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.967420 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564"] Jan 28 17:15:04 crc kubenswrapper[4877]: I0128 17:15:04.977426 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493630-n5564"] Jan 28 17:15:05 crc kubenswrapper[4877]: I0128 17:15:05.347239 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="587b4df1-7315-4a7f-b416-d2e3ff99fd0d" path="/var/lib/kubelet/pods/587b4df1-7315-4a7f-b416-d2e3ff99fd0d/volumes" Jan 28 17:15:08 crc kubenswrapper[4877]: I0128 17:15:08.407909 4877 generic.go:334] "Generic (PLEG): container finished" podID="d2007094-f320-4e43-903f-20a3b7705f1a" containerID="adc35db7d3305977e95ba35915dac2a4b80df27d0ac795fe6ff22f03bbc7363b" exitCode=0 Jan 28 17:15:08 crc kubenswrapper[4877]: I0128 17:15:08.407940 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" event={"ID":"d2007094-f320-4e43-903f-20a3b7705f1a","Type":"ContainerDied","Data":"adc35db7d3305977e95ba35915dac2a4b80df27d0ac795fe6ff22f03bbc7363b"} Jan 28 17:15:09 crc kubenswrapper[4877]: I0128 17:15:09.902278 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:09 crc kubenswrapper[4877]: I0128 17:15:09.982574 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-ssh-key-openstack-edpm-ipam\") pod \"d2007094-f320-4e43-903f-20a3b7705f1a\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " Jan 28 17:15:09 crc kubenswrapper[4877]: I0128 17:15:09.983077 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mktj\" (UniqueName: \"kubernetes.io/projected/d2007094-f320-4e43-903f-20a3b7705f1a-kube-api-access-9mktj\") pod \"d2007094-f320-4e43-903f-20a3b7705f1a\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " Jan 28 17:15:09 crc kubenswrapper[4877]: I0128 17:15:09.983239 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-inventory\") pod \"d2007094-f320-4e43-903f-20a3b7705f1a\" (UID: \"d2007094-f320-4e43-903f-20a3b7705f1a\") " Jan 28 17:15:09 crc kubenswrapper[4877]: I0128 17:15:09.988848 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2007094-f320-4e43-903f-20a3b7705f1a-kube-api-access-9mktj" (OuterVolumeSpecName: "kube-api-access-9mktj") pod "d2007094-f320-4e43-903f-20a3b7705f1a" (UID: "d2007094-f320-4e43-903f-20a3b7705f1a"). InnerVolumeSpecName "kube-api-access-9mktj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.025622 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-inventory" (OuterVolumeSpecName: "inventory") pod "d2007094-f320-4e43-903f-20a3b7705f1a" (UID: "d2007094-f320-4e43-903f-20a3b7705f1a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.036823 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d2007094-f320-4e43-903f-20a3b7705f1a" (UID: "d2007094-f320-4e43-903f-20a3b7705f1a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.086690 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.086721 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mktj\" (UniqueName: \"kubernetes.io/projected/d2007094-f320-4e43-903f-20a3b7705f1a-kube-api-access-9mktj\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.086730 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2007094-f320-4e43-903f-20a3b7705f1a-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.430245 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" event={"ID":"d2007094-f320-4e43-903f-20a3b7705f1a","Type":"ContainerDied","Data":"8a1ac1abba785daf6d09e4532175a9c4bdca2be5de60260dc4917f9e42ab03fb"} Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.430285 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a1ac1abba785daf6d09e4532175a9c4bdca2be5de60260dc4917f9e42ab03fb" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.430397 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x6zz" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.512884 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5"] Jan 28 17:15:10 crc kubenswrapper[4877]: E0128 17:15:10.513516 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" containerName="collect-profiles" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.513552 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" containerName="collect-profiles" Jan 28 17:15:10 crc kubenswrapper[4877]: E0128 17:15:10.513591 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2007094-f320-4e43-903f-20a3b7705f1a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.513606 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2007094-f320-4e43-903f-20a3b7705f1a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.513859 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" containerName="collect-profiles" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.513880 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2007094-f320-4e43-903f-20a3b7705f1a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.514814 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.523236 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5"] Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.523696 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.524063 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.524069 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.525013 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.596543 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.597064 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qfxw\" (UniqueName: \"kubernetes.io/projected/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-kube-api-access-4qfxw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.597242 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.700009 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qfxw\" (UniqueName: \"kubernetes.io/projected/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-kube-api-access-4qfxw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.700425 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.700541 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.705200 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.705954 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.721937 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qfxw\" (UniqueName: \"kubernetes.io/projected/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-kube-api-access-4qfxw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-whbl5\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:10 crc kubenswrapper[4877]: I0128 17:15:10.835180 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:11 crc kubenswrapper[4877]: I0128 17:15:11.461940 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5"] Jan 28 17:15:12 crc kubenswrapper[4877]: I0128 17:15:12.450954 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" event={"ID":"64c03fb4-1aee-45a7-aefa-14b31db9ef6f","Type":"ContainerStarted","Data":"f455f003e0d0312d994f4167089f15b8371a497e67263a533ca2d8a1360fb572"} Jan 28 17:15:12 crc kubenswrapper[4877]: I0128 17:15:12.451325 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" event={"ID":"64c03fb4-1aee-45a7-aefa-14b31db9ef6f","Type":"ContainerStarted","Data":"bd52b05e0282fb5f709456e794ccdd04c99eee646f8a26cb32b16f94c9619f8d"} Jan 28 17:15:12 crc kubenswrapper[4877]: I0128 17:15:12.485984 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" podStartSLOduration=2.000592718 podStartE2EDuration="2.48596276s" podCreationTimestamp="2026-01-28 17:15:10 +0000 UTC" firstStartedPulling="2026-01-28 17:15:11.473156503 +0000 UTC m=+2415.031483391" lastFinishedPulling="2026-01-28 17:15:11.958526545 +0000 UTC m=+2415.516853433" observedRunningTime="2026-01-28 17:15:12.473268019 +0000 UTC m=+2416.031594907" watchObservedRunningTime="2026-01-28 17:15:12.48596276 +0000 UTC m=+2416.044289658" Jan 28 17:15:18 crc kubenswrapper[4877]: I0128 17:15:18.989969 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sd974"] Jan 28 17:15:18 crc kubenswrapper[4877]: I0128 17:15:18.993413 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.010242 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sd974"] Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.133202 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-utilities\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.133782 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-catalog-content\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.133813 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpkf2\" (UniqueName: \"kubernetes.io/projected/68e23a3a-fa21-4125-b592-2dc585d5d9e0-kube-api-access-qpkf2\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.236613 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-utilities\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.236688 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-catalog-content\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.236714 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpkf2\" (UniqueName: \"kubernetes.io/projected/68e23a3a-fa21-4125-b592-2dc585d5d9e0-kube-api-access-qpkf2\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.237342 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-catalog-content\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.237357 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-utilities\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.260285 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpkf2\" (UniqueName: \"kubernetes.io/projected/68e23a3a-fa21-4125-b592-2dc585d5d9e0-kube-api-access-qpkf2\") pod \"community-operators-sd974\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.321734 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.330508 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:15:19 crc kubenswrapper[4877]: E0128 17:15:19.330963 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:15:19 crc kubenswrapper[4877]: I0128 17:15:19.873750 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sd974"] Jan 28 17:15:20 crc kubenswrapper[4877]: I0128 17:15:20.563416 4877 generic.go:334] "Generic (PLEG): container finished" podID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerID="458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33" exitCode=0 Jan 28 17:15:20 crc kubenswrapper[4877]: I0128 17:15:20.563513 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sd974" event={"ID":"68e23a3a-fa21-4125-b592-2dc585d5d9e0","Type":"ContainerDied","Data":"458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33"} Jan 28 17:15:20 crc kubenswrapper[4877]: I0128 17:15:20.563550 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sd974" event={"ID":"68e23a3a-fa21-4125-b592-2dc585d5d9e0","Type":"ContainerStarted","Data":"a8d7b26b00bd7fd0ec50c55686e0ac30ea9957c09559c368b3e498f9a8f3cfc7"} Jan 28 17:15:21 crc kubenswrapper[4877]: I0128 17:15:21.573560 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sd974" event={"ID":"68e23a3a-fa21-4125-b592-2dc585d5d9e0","Type":"ContainerStarted","Data":"b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60"} Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.186892 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gvnrm"] Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.189330 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.205802 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gvnrm"] Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.310018 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-utilities\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.310075 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-catalog-content\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.310144 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s77b5\" (UniqueName: \"kubernetes.io/projected/c6998378-c9db-4eea-919e-efb4166feae0-kube-api-access-s77b5\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.412366 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-utilities\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.412447 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-catalog-content\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.412526 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s77b5\" (UniqueName: \"kubernetes.io/projected/c6998378-c9db-4eea-919e-efb4166feae0-kube-api-access-s77b5\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.412874 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-utilities\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.413099 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-catalog-content\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.434398 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s77b5\" (UniqueName: \"kubernetes.io/projected/c6998378-c9db-4eea-919e-efb4166feae0-kube-api-access-s77b5\") pod \"redhat-operators-gvnrm\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:22 crc kubenswrapper[4877]: I0128 17:15:22.566515 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:23 crc kubenswrapper[4877]: I0128 17:15:23.185208 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gvnrm"] Jan 28 17:15:23 crc kubenswrapper[4877]: I0128 17:15:23.596604 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvnrm" event={"ID":"c6998378-c9db-4eea-919e-efb4166feae0","Type":"ContainerStarted","Data":"31c1206dab646b59952826e646b8b5eb8c78e657f37452ad3b0e3039036b75f0"} Jan 28 17:15:24 crc kubenswrapper[4877]: I0128 17:15:24.609663 4877 generic.go:334] "Generic (PLEG): container finished" podID="c6998378-c9db-4eea-919e-efb4166feae0" containerID="35355e93564c5cad997c5f672e569c935679177b371086fc481197267331b26d" exitCode=0 Jan 28 17:15:24 crc kubenswrapper[4877]: I0128 17:15:24.609735 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvnrm" event={"ID":"c6998378-c9db-4eea-919e-efb4166feae0","Type":"ContainerDied","Data":"35355e93564c5cad997c5f672e569c935679177b371086fc481197267331b26d"} Jan 28 17:15:24 crc kubenswrapper[4877]: I0128 17:15:24.612229 4877 generic.go:334] "Generic (PLEG): container finished" podID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerID="b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60" exitCode=0 Jan 28 17:15:24 crc kubenswrapper[4877]: I0128 17:15:24.612276 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sd974" event={"ID":"68e23a3a-fa21-4125-b592-2dc585d5d9e0","Type":"ContainerDied","Data":"b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60"} Jan 28 17:15:27 crc kubenswrapper[4877]: I0128 17:15:27.649392 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sd974" event={"ID":"68e23a3a-fa21-4125-b592-2dc585d5d9e0","Type":"ContainerStarted","Data":"66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f"} Jan 28 17:15:27 crc kubenswrapper[4877]: I0128 17:15:27.661351 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvnrm" event={"ID":"c6998378-c9db-4eea-919e-efb4166feae0","Type":"ContainerStarted","Data":"90d1ffd09236cd1d92a5532b7be071f1988ea608efa320b10eb27bf82cd83173"} Jan 28 17:15:27 crc kubenswrapper[4877]: I0128 17:15:27.695397 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sd974" podStartSLOduration=3.465336066 podStartE2EDuration="9.695375451s" podCreationTimestamp="2026-01-28 17:15:18 +0000 UTC" firstStartedPulling="2026-01-28 17:15:20.566624689 +0000 UTC m=+2424.124951577" lastFinishedPulling="2026-01-28 17:15:26.796664074 +0000 UTC m=+2430.354990962" observedRunningTime="2026-01-28 17:15:27.685589267 +0000 UTC m=+2431.243916175" watchObservedRunningTime="2026-01-28 17:15:27.695375451 +0000 UTC m=+2431.253702339" Jan 28 17:15:29 crc kubenswrapper[4877]: I0128 17:15:29.321858 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:29 crc kubenswrapper[4877]: I0128 17:15:29.322106 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:30 crc kubenswrapper[4877]: I0128 17:15:30.032204 4877 scope.go:117] "RemoveContainer" containerID="ed97ff0eaabbe956455402550e7465babb30e5600133984b95a9c60955aa8927" Jan 28 17:15:30 crc kubenswrapper[4877]: I0128 17:15:30.061165 4877 scope.go:117] "RemoveContainer" containerID="01a871d9b7cb7c30eb2fc7c233b32c042067939b85b4157c38ba6228f3d56c86" Jan 28 17:15:30 crc kubenswrapper[4877]: I0128 17:15:30.330321 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:15:30 crc kubenswrapper[4877]: E0128 17:15:30.330912 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:15:30 crc kubenswrapper[4877]: I0128 17:15:30.380015 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-sd974" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="registry-server" probeResult="failure" output=< Jan 28 17:15:30 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:15:30 crc kubenswrapper[4877]: > Jan 28 17:15:37 crc kubenswrapper[4877]: I0128 17:15:37.970219 4877 patch_prober.go:28] interesting pod/metrics-server-fbbd74554-qkt8l container/metrics-server namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 17:15:37 crc kubenswrapper[4877]: I0128 17:15:37.970720 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:15:37 crc kubenswrapper[4877]: I0128 17:15:37.970315 4877 patch_prober.go:28] interesting pod/metrics-server-fbbd74554-qkt8l container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 17:15:37 crc kubenswrapper[4877]: I0128 17:15:37.971098 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:15:39 crc kubenswrapper[4877]: I0128 17:15:39.388380 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:39 crc kubenswrapper[4877]: I0128 17:15:39.440650 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:39 crc kubenswrapper[4877]: I0128 17:15:39.631929 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sd974"] Jan 28 17:15:40 crc kubenswrapper[4877]: I0128 17:15:40.804740 4877 generic.go:334] "Generic (PLEG): container finished" podID="c6998378-c9db-4eea-919e-efb4166feae0" containerID="90d1ffd09236cd1d92a5532b7be071f1988ea608efa320b10eb27bf82cd83173" exitCode=0 Jan 28 17:15:40 crc kubenswrapper[4877]: I0128 17:15:40.804863 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvnrm" event={"ID":"c6998378-c9db-4eea-919e-efb4166feae0","Type":"ContainerDied","Data":"90d1ffd09236cd1d92a5532b7be071f1988ea608efa320b10eb27bf82cd83173"} Jan 28 17:15:40 crc kubenswrapper[4877]: I0128 17:15:40.805337 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sd974" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="registry-server" containerID="cri-o://66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f" gracePeriod=2 Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.359801 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.493528 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-catalog-content\") pod \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.494158 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpkf2\" (UniqueName: \"kubernetes.io/projected/68e23a3a-fa21-4125-b592-2dc585d5d9e0-kube-api-access-qpkf2\") pod \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.494258 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-utilities\") pod \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\" (UID: \"68e23a3a-fa21-4125-b592-2dc585d5d9e0\") " Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.494836 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-utilities" (OuterVolumeSpecName: "utilities") pod "68e23a3a-fa21-4125-b592-2dc585d5d9e0" (UID: "68e23a3a-fa21-4125-b592-2dc585d5d9e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.495243 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.510606 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68e23a3a-fa21-4125-b592-2dc585d5d9e0-kube-api-access-qpkf2" (OuterVolumeSpecName: "kube-api-access-qpkf2") pod "68e23a3a-fa21-4125-b592-2dc585d5d9e0" (UID: "68e23a3a-fa21-4125-b592-2dc585d5d9e0"). InnerVolumeSpecName "kube-api-access-qpkf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.549624 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68e23a3a-fa21-4125-b592-2dc585d5d9e0" (UID: "68e23a3a-fa21-4125-b592-2dc585d5d9e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.598326 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpkf2\" (UniqueName: \"kubernetes.io/projected/68e23a3a-fa21-4125-b592-2dc585d5d9e0-kube-api-access-qpkf2\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.598371 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68e23a3a-fa21-4125-b592-2dc585d5d9e0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.855034 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvnrm" event={"ID":"c6998378-c9db-4eea-919e-efb4166feae0","Type":"ContainerStarted","Data":"050c3654deb88c30ae66ca41f5b34b49abd7f071633134d5c8ccfa33afc5246e"} Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.862782 4877 generic.go:334] "Generic (PLEG): container finished" podID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerID="66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f" exitCode=0 Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.862847 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sd974" event={"ID":"68e23a3a-fa21-4125-b592-2dc585d5d9e0","Type":"ContainerDied","Data":"66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f"} Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.862882 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sd974" event={"ID":"68e23a3a-fa21-4125-b592-2dc585d5d9e0","Type":"ContainerDied","Data":"a8d7b26b00bd7fd0ec50c55686e0ac30ea9957c09559c368b3e498f9a8f3cfc7"} Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.862905 4877 scope.go:117] "RemoveContainer" containerID="66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.863068 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sd974" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.884898 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gvnrm" podStartSLOduration=3.163632225 podStartE2EDuration="19.884875306s" podCreationTimestamp="2026-01-28 17:15:22 +0000 UTC" firstStartedPulling="2026-01-28 17:15:24.612429971 +0000 UTC m=+2428.170756859" lastFinishedPulling="2026-01-28 17:15:41.333673052 +0000 UTC m=+2444.891999940" observedRunningTime="2026-01-28 17:15:41.874734912 +0000 UTC m=+2445.433061800" watchObservedRunningTime="2026-01-28 17:15:41.884875306 +0000 UTC m=+2445.443202194" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.901194 4877 scope.go:117] "RemoveContainer" containerID="b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.916661 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sd974"] Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.930077 4877 scope.go:117] "RemoveContainer" containerID="458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.932065 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sd974"] Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.982042 4877 scope.go:117] "RemoveContainer" containerID="66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f" Jan 28 17:15:41 crc kubenswrapper[4877]: E0128 17:15:41.982528 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f\": container with ID starting with 66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f not found: ID does not exist" containerID="66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.982556 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f"} err="failed to get container status \"66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f\": rpc error: code = NotFound desc = could not find container \"66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f\": container with ID starting with 66d7379584f836caf068f8137e8ec3b064f760cb163a4fb170223018590a753f not found: ID does not exist" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.982577 4877 scope.go:117] "RemoveContainer" containerID="b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60" Jan 28 17:15:41 crc kubenswrapper[4877]: E0128 17:15:41.982922 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60\": container with ID starting with b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60 not found: ID does not exist" containerID="b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.982943 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60"} err="failed to get container status \"b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60\": rpc error: code = NotFound desc = could not find container \"b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60\": container with ID starting with b1c360972f5e821c290f8a555e65ccf0439178c6696a709d7c0fced3606d8e60 not found: ID does not exist" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.982956 4877 scope.go:117] "RemoveContainer" containerID="458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33" Jan 28 17:15:41 crc kubenswrapper[4877]: E0128 17:15:41.983281 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33\": container with ID starting with 458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33 not found: ID does not exist" containerID="458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33" Jan 28 17:15:41 crc kubenswrapper[4877]: I0128 17:15:41.983303 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33"} err="failed to get container status \"458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33\": rpc error: code = NotFound desc = could not find container \"458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33\": container with ID starting with 458b9237ca1731530b38acc1f2812d48db9ecbfa358cd2b864f5d2116b5bcc33 not found: ID does not exist" Jan 28 17:15:42 crc kubenswrapper[4877]: I0128 17:15:42.566937 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:42 crc kubenswrapper[4877]: I0128 17:15:42.567230 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:43 crc kubenswrapper[4877]: I0128 17:15:43.343983 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" path="/var/lib/kubelet/pods/68e23a3a-fa21-4125-b592-2dc585d5d9e0/volumes" Jan 28 17:15:43 crc kubenswrapper[4877]: I0128 17:15:43.620810 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gvnrm" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="registry-server" probeResult="failure" output=< Jan 28 17:15:43 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:15:43 crc kubenswrapper[4877]: > Jan 28 17:15:44 crc kubenswrapper[4877]: I0128 17:15:44.331146 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:15:44 crc kubenswrapper[4877]: E0128 17:15:44.331511 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:15:51 crc kubenswrapper[4877]: I0128 17:15:51.970827 4877 generic.go:334] "Generic (PLEG): container finished" podID="64c03fb4-1aee-45a7-aefa-14b31db9ef6f" containerID="f455f003e0d0312d994f4167089f15b8371a497e67263a533ca2d8a1360fb572" exitCode=0 Jan 28 17:15:51 crc kubenswrapper[4877]: I0128 17:15:51.971196 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" event={"ID":"64c03fb4-1aee-45a7-aefa-14b31db9ef6f","Type":"ContainerDied","Data":"f455f003e0d0312d994f4167089f15b8371a497e67263a533ca2d8a1360fb572"} Jan 28 17:15:52 crc kubenswrapper[4877]: I0128 17:15:52.620045 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:52 crc kubenswrapper[4877]: I0128 17:15:52.671898 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.422066 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gvnrm"] Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.824668 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.970363 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-ssh-key-openstack-edpm-ipam\") pod \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.970543 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qfxw\" (UniqueName: \"kubernetes.io/projected/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-kube-api-access-4qfxw\") pod \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.971802 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-inventory\") pod \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\" (UID: \"64c03fb4-1aee-45a7-aefa-14b31db9ef6f\") " Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.976352 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-kube-api-access-4qfxw" (OuterVolumeSpecName: "kube-api-access-4qfxw") pod "64c03fb4-1aee-45a7-aefa-14b31db9ef6f" (UID: "64c03fb4-1aee-45a7-aefa-14b31db9ef6f"). InnerVolumeSpecName "kube-api-access-4qfxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.998638 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" event={"ID":"64c03fb4-1aee-45a7-aefa-14b31db9ef6f","Type":"ContainerDied","Data":"bd52b05e0282fb5f709456e794ccdd04c99eee646f8a26cb32b16f94c9619f8d"} Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.998693 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-whbl5" Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.998708 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd52b05e0282fb5f709456e794ccdd04c99eee646f8a26cb32b16f94c9619f8d" Jan 28 17:15:53 crc kubenswrapper[4877]: I0128 17:15:53.998993 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gvnrm" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="registry-server" containerID="cri-o://050c3654deb88c30ae66ca41f5b34b49abd7f071633134d5c8ccfa33afc5246e" gracePeriod=2 Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.009741 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "64c03fb4-1aee-45a7-aefa-14b31db9ef6f" (UID: "64c03fb4-1aee-45a7-aefa-14b31db9ef6f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.025046 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-inventory" (OuterVolumeSpecName: "inventory") pod "64c03fb4-1aee-45a7-aefa-14b31db9ef6f" (UID: "64c03fb4-1aee-45a7-aefa-14b31db9ef6f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.076682 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.076755 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.076774 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qfxw\" (UniqueName: \"kubernetes.io/projected/64c03fb4-1aee-45a7-aefa-14b31db9ef6f-kube-api-access-4qfxw\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.123963 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb"] Jan 28 17:15:54 crc kubenswrapper[4877]: E0128 17:15:54.125438 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="registry-server" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.125605 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="registry-server" Jan 28 17:15:54 crc kubenswrapper[4877]: E0128 17:15:54.125717 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64c03fb4-1aee-45a7-aefa-14b31db9ef6f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.125825 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="64c03fb4-1aee-45a7-aefa-14b31db9ef6f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:54 crc kubenswrapper[4877]: E0128 17:15:54.125947 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="extract-utilities" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.126058 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="extract-utilities" Jan 28 17:15:54 crc kubenswrapper[4877]: E0128 17:15:54.126144 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="extract-content" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.126232 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="extract-content" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.126924 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="68e23a3a-fa21-4125-b592-2dc585d5d9e0" containerName="registry-server" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.127088 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="64c03fb4-1aee-45a7-aefa-14b31db9ef6f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.128818 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.139027 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb"] Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.182233 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz6h4\" (UniqueName: \"kubernetes.io/projected/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-kube-api-access-lz6h4\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.182310 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.182759 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.285833 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.286173 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz6h4\" (UniqueName: \"kubernetes.io/projected/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-kube-api-access-lz6h4\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.286237 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.302503 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.302573 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.325213 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz6h4\" (UniqueName: \"kubernetes.io/projected/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-kube-api-access-lz6h4\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:54 crc kubenswrapper[4877]: I0128 17:15:54.453744 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.017260 4877 generic.go:334] "Generic (PLEG): container finished" podID="c6998378-c9db-4eea-919e-efb4166feae0" containerID="050c3654deb88c30ae66ca41f5b34b49abd7f071633134d5c8ccfa33afc5246e" exitCode=0 Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.017321 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvnrm" event={"ID":"c6998378-c9db-4eea-919e-efb4166feae0","Type":"ContainerDied","Data":"050c3654deb88c30ae66ca41f5b34b49abd7f071633134d5c8ccfa33afc5246e"} Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.156712 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb"] Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.258454 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.321225 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-utilities\") pod \"c6998378-c9db-4eea-919e-efb4166feae0\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.321389 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-catalog-content\") pod \"c6998378-c9db-4eea-919e-efb4166feae0\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.322444 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s77b5\" (UniqueName: \"kubernetes.io/projected/c6998378-c9db-4eea-919e-efb4166feae0-kube-api-access-s77b5\") pod \"c6998378-c9db-4eea-919e-efb4166feae0\" (UID: \"c6998378-c9db-4eea-919e-efb4166feae0\") " Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.322863 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-utilities" (OuterVolumeSpecName: "utilities") pod "c6998378-c9db-4eea-919e-efb4166feae0" (UID: "c6998378-c9db-4eea-919e-efb4166feae0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.324430 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.334882 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6998378-c9db-4eea-919e-efb4166feae0-kube-api-access-s77b5" (OuterVolumeSpecName: "kube-api-access-s77b5") pod "c6998378-c9db-4eea-919e-efb4166feae0" (UID: "c6998378-c9db-4eea-919e-efb4166feae0"). InnerVolumeSpecName "kube-api-access-s77b5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.427316 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s77b5\" (UniqueName: \"kubernetes.io/projected/c6998378-c9db-4eea-919e-efb4166feae0-kube-api-access-s77b5\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.467301 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c6998378-c9db-4eea-919e-efb4166feae0" (UID: "c6998378-c9db-4eea-919e-efb4166feae0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:15:55 crc kubenswrapper[4877]: I0128 17:15:55.529777 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6998378-c9db-4eea-919e-efb4166feae0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.030695 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" event={"ID":"ba650e8a-baee-48e9-8a3b-a45d1418b9fe","Type":"ContainerStarted","Data":"fb4108908ab1182319837fc312de9c79aa75c9babdd4531fdd2c7cb72c923712"} Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.034061 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gvnrm" event={"ID":"c6998378-c9db-4eea-919e-efb4166feae0","Type":"ContainerDied","Data":"31c1206dab646b59952826e646b8b5eb8c78e657f37452ad3b0e3039036b75f0"} Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.034112 4877 scope.go:117] "RemoveContainer" containerID="050c3654deb88c30ae66ca41f5b34b49abd7f071633134d5c8ccfa33afc5246e" Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.034339 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gvnrm" Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.162878 4877 scope.go:117] "RemoveContainer" containerID="90d1ffd09236cd1d92a5532b7be071f1988ea608efa320b10eb27bf82cd83173" Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.209768 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gvnrm"] Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.210556 4877 scope.go:117] "RemoveContainer" containerID="35355e93564c5cad997c5f672e569c935679177b371086fc481197267331b26d" Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.224559 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gvnrm"] Jan 28 17:15:56 crc kubenswrapper[4877]: I0128 17:15:56.331522 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:15:56 crc kubenswrapper[4877]: E0128 17:15:56.332160 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:15:57 crc kubenswrapper[4877]: I0128 17:15:57.051838 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" event={"ID":"ba650e8a-baee-48e9-8a3b-a45d1418b9fe","Type":"ContainerStarted","Data":"c722efc08f52c43950c54e9c1bb4fba4761199fa5359a86183442cfad5d743fd"} Jan 28 17:15:57 crc kubenswrapper[4877]: I0128 17:15:57.079264 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" podStartSLOduration=2.243234333 podStartE2EDuration="3.079225102s" podCreationTimestamp="2026-01-28 17:15:54 +0000 UTC" firstStartedPulling="2026-01-28 17:15:55.175106827 +0000 UTC m=+2458.733433715" lastFinishedPulling="2026-01-28 17:15:56.011097596 +0000 UTC m=+2459.569424484" observedRunningTime="2026-01-28 17:15:57.065566034 +0000 UTC m=+2460.623892942" watchObservedRunningTime="2026-01-28 17:15:57.079225102 +0000 UTC m=+2460.637552030" Jan 28 17:15:57 crc kubenswrapper[4877]: I0128 17:15:57.346366 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6998378-c9db-4eea-919e-efb4166feae0" path="/var/lib/kubelet/pods/c6998378-c9db-4eea-919e-efb4166feae0/volumes" Jan 28 17:16:10 crc kubenswrapper[4877]: I0128 17:16:10.330301 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:16:10 crc kubenswrapper[4877]: E0128 17:16:10.332672 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:16:24 crc kubenswrapper[4877]: I0128 17:16:24.331115 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:16:24 crc kubenswrapper[4877]: E0128 17:16:24.331948 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:16:29 crc kubenswrapper[4877]: I0128 17:16:29.048930 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-t55qb"] Jan 28 17:16:29 crc kubenswrapper[4877]: I0128 17:16:29.065021 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-t55qb"] Jan 28 17:16:29 crc kubenswrapper[4877]: I0128 17:16:29.344123 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b5478d7-63fb-41e2-89c8-1d27290a9844" path="/var/lib/kubelet/pods/4b5478d7-63fb-41e2-89c8-1d27290a9844/volumes" Jan 28 17:16:30 crc kubenswrapper[4877]: I0128 17:16:30.198315 4877 scope.go:117] "RemoveContainer" containerID="f0366d9ca67cf0912ad66cb7997b6b6efd6fa01d20752020bd4d5e5173d2a2f5" Jan 28 17:16:37 crc kubenswrapper[4877]: I0128 17:16:37.340278 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:16:37 crc kubenswrapper[4877]: E0128 17:16:37.341239 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:16:42 crc kubenswrapper[4877]: I0128 17:16:42.548418 4877 generic.go:334] "Generic (PLEG): container finished" podID="ba650e8a-baee-48e9-8a3b-a45d1418b9fe" containerID="c722efc08f52c43950c54e9c1bb4fba4761199fa5359a86183442cfad5d743fd" exitCode=0 Jan 28 17:16:42 crc kubenswrapper[4877]: I0128 17:16:42.548546 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" event={"ID":"ba650e8a-baee-48e9-8a3b-a45d1418b9fe","Type":"ContainerDied","Data":"c722efc08f52c43950c54e9c1bb4fba4761199fa5359a86183442cfad5d743fd"} Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.062615 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.197141 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-ssh-key-openstack-edpm-ipam\") pod \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.197301 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-inventory\") pod \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.197398 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz6h4\" (UniqueName: \"kubernetes.io/projected/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-kube-api-access-lz6h4\") pod \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\" (UID: \"ba650e8a-baee-48e9-8a3b-a45d1418b9fe\") " Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.204157 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-kube-api-access-lz6h4" (OuterVolumeSpecName: "kube-api-access-lz6h4") pod "ba650e8a-baee-48e9-8a3b-a45d1418b9fe" (UID: "ba650e8a-baee-48e9-8a3b-a45d1418b9fe"). InnerVolumeSpecName "kube-api-access-lz6h4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.237331 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-inventory" (OuterVolumeSpecName: "inventory") pod "ba650e8a-baee-48e9-8a3b-a45d1418b9fe" (UID: "ba650e8a-baee-48e9-8a3b-a45d1418b9fe"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.240710 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ba650e8a-baee-48e9-8a3b-a45d1418b9fe" (UID: "ba650e8a-baee-48e9-8a3b-a45d1418b9fe"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.302038 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.302087 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz6h4\" (UniqueName: \"kubernetes.io/projected/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-kube-api-access-lz6h4\") on node \"crc\" DevicePath \"\"" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.302106 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ba650e8a-baee-48e9-8a3b-a45d1418b9fe-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.570080 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" event={"ID":"ba650e8a-baee-48e9-8a3b-a45d1418b9fe","Type":"ContainerDied","Data":"fb4108908ab1182319837fc312de9c79aa75c9babdd4531fdd2c7cb72c923712"} Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.570136 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb4108908ab1182319837fc312de9c79aa75c9babdd4531fdd2c7cb72c923712" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.570181 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5t7gb" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.680950 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-n9bp4"] Jan 28 17:16:44 crc kubenswrapper[4877]: E0128 17:16:44.682254 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="extract-content" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.682283 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="extract-content" Jan 28 17:16:44 crc kubenswrapper[4877]: E0128 17:16:44.682321 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="extract-utilities" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.682331 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="extract-utilities" Jan 28 17:16:44 crc kubenswrapper[4877]: E0128 17:16:44.682350 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="registry-server" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.682359 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="registry-server" Jan 28 17:16:44 crc kubenswrapper[4877]: E0128 17:16:44.682391 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba650e8a-baee-48e9-8a3b-a45d1418b9fe" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.682401 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba650e8a-baee-48e9-8a3b-a45d1418b9fe" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.682720 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6998378-c9db-4eea-919e-efb4166feae0" containerName="registry-server" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.682754 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba650e8a-baee-48e9-8a3b-a45d1418b9fe" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.683792 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.703184 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.704319 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.704355 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.726585 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.748101 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-n9bp4"] Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.845410 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6k8r\" (UniqueName: \"kubernetes.io/projected/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-kube-api-access-r6k8r\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.845719 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.846499 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.954068 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6k8r\" (UniqueName: \"kubernetes.io/projected/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-kube-api-access-r6k8r\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.954160 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.954325 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.962342 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.964153 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:44 crc kubenswrapper[4877]: I0128 17:16:44.972669 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6k8r\" (UniqueName: \"kubernetes.io/projected/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-kube-api-access-r6k8r\") pod \"ssh-known-hosts-edpm-deployment-n9bp4\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:45 crc kubenswrapper[4877]: I0128 17:16:45.035554 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:45 crc kubenswrapper[4877]: I0128 17:16:45.615156 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-n9bp4"] Jan 28 17:16:46 crc kubenswrapper[4877]: I0128 17:16:46.597727 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" event={"ID":"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd","Type":"ContainerStarted","Data":"37c8747e0b39eba59ce9cd7cb0ceadd363e86fdde171189ebe60a359abbd8224"} Jan 28 17:16:46 crc kubenswrapper[4877]: I0128 17:16:46.598053 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" event={"ID":"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd","Type":"ContainerStarted","Data":"4621bae21c6145efca47c8fe318d681d91c5ca5791baf8e563c4f9fb78576790"} Jan 28 17:16:46 crc kubenswrapper[4877]: I0128 17:16:46.626218 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" podStartSLOduration=1.975358853 podStartE2EDuration="2.626198119s" podCreationTimestamp="2026-01-28 17:16:44 +0000 UTC" firstStartedPulling="2026-01-28 17:16:45.605214051 +0000 UTC m=+2509.163540939" lastFinishedPulling="2026-01-28 17:16:46.256053317 +0000 UTC m=+2509.814380205" observedRunningTime="2026-01-28 17:16:46.61435825 +0000 UTC m=+2510.172685138" watchObservedRunningTime="2026-01-28 17:16:46.626198119 +0000 UTC m=+2510.184525007" Jan 28 17:16:48 crc kubenswrapper[4877]: I0128 17:16:48.331562 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:16:48 crc kubenswrapper[4877]: E0128 17:16:48.332379 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:16:53 crc kubenswrapper[4877]: I0128 17:16:53.690409 4877 generic.go:334] "Generic (PLEG): container finished" podID="beeff2ea-e44f-48e8-bc5d-f6f4ae952acd" containerID="37c8747e0b39eba59ce9cd7cb0ceadd363e86fdde171189ebe60a359abbd8224" exitCode=0 Jan 28 17:16:53 crc kubenswrapper[4877]: I0128 17:16:53.690494 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" event={"ID":"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd","Type":"ContainerDied","Data":"37c8747e0b39eba59ce9cd7cb0ceadd363e86fdde171189ebe60a359abbd8224"} Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.191682 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.351330 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-inventory-0\") pod \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.351431 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6k8r\" (UniqueName: \"kubernetes.io/projected/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-kube-api-access-r6k8r\") pod \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.351498 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-ssh-key-openstack-edpm-ipam\") pod \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\" (UID: \"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd\") " Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.363242 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-kube-api-access-r6k8r" (OuterVolumeSpecName: "kube-api-access-r6k8r") pod "beeff2ea-e44f-48e8-bc5d-f6f4ae952acd" (UID: "beeff2ea-e44f-48e8-bc5d-f6f4ae952acd"). InnerVolumeSpecName "kube-api-access-r6k8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.386014 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "beeff2ea-e44f-48e8-bc5d-f6f4ae952acd" (UID: "beeff2ea-e44f-48e8-bc5d-f6f4ae952acd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.393648 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "beeff2ea-e44f-48e8-bc5d-f6f4ae952acd" (UID: "beeff2ea-e44f-48e8-bc5d-f6f4ae952acd"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.456647 4877 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.456680 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6k8r\" (UniqueName: \"kubernetes.io/projected/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-kube-api-access-r6k8r\") on node \"crc\" DevicePath \"\"" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.456693 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/beeff2ea-e44f-48e8-bc5d-f6f4ae952acd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.716826 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" event={"ID":"beeff2ea-e44f-48e8-bc5d-f6f4ae952acd","Type":"ContainerDied","Data":"4621bae21c6145efca47c8fe318d681d91c5ca5791baf8e563c4f9fb78576790"} Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.716877 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4621bae21c6145efca47c8fe318d681d91c5ca5791baf8e563c4f9fb78576790" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.716909 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-n9bp4" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.786244 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg"] Jan 28 17:16:55 crc kubenswrapper[4877]: E0128 17:16:55.786991 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="beeff2ea-e44f-48e8-bc5d-f6f4ae952acd" containerName="ssh-known-hosts-edpm-deployment" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.787010 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="beeff2ea-e44f-48e8-bc5d-f6f4ae952acd" containerName="ssh-known-hosts-edpm-deployment" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.787240 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="beeff2ea-e44f-48e8-bc5d-f6f4ae952acd" containerName="ssh-known-hosts-edpm-deployment" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.788186 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.792103 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.792455 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.794872 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.800014 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.809992 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg"] Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.968876 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.969252 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:55 crc kubenswrapper[4877]: I0128 17:16:55.969458 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7rbr\" (UniqueName: \"kubernetes.io/projected/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-kube-api-access-c7rbr\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.071455 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.071594 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7rbr\" (UniqueName: \"kubernetes.io/projected/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-kube-api-access-c7rbr\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.071728 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.078167 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.078162 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.106211 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7rbr\" (UniqueName: \"kubernetes.io/projected/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-kube-api-access-c7rbr\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-2pxhg\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.109205 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:16:56 crc kubenswrapper[4877]: I0128 17:16:56.725566 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg"] Jan 28 17:16:57 crc kubenswrapper[4877]: I0128 17:16:57.739253 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" event={"ID":"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a","Type":"ContainerStarted","Data":"22c7d63860e5a89fd71d6783f2ea6c8b14d2e09e18d6828095b9d2c37d789d2f"} Jan 28 17:16:57 crc kubenswrapper[4877]: I0128 17:16:57.739653 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" event={"ID":"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a","Type":"ContainerStarted","Data":"5055b60ce9147e5b340e4495fcc009644d47bdbb37f1d04c38e67690585e05f7"} Jan 28 17:16:57 crc kubenswrapper[4877]: I0128 17:16:57.762164 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" podStartSLOduration=2.339738846 podStartE2EDuration="2.762142434s" podCreationTimestamp="2026-01-28 17:16:55 +0000 UTC" firstStartedPulling="2026-01-28 17:16:56.739101851 +0000 UTC m=+2520.297428739" lastFinishedPulling="2026-01-28 17:16:57.161505439 +0000 UTC m=+2520.719832327" observedRunningTime="2026-01-28 17:16:57.753320747 +0000 UTC m=+2521.311647695" watchObservedRunningTime="2026-01-28 17:16:57.762142434 +0000 UTC m=+2521.320469322" Jan 28 17:16:59 crc kubenswrapper[4877]: I0128 17:16:59.330163 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:16:59 crc kubenswrapper[4877]: E0128 17:16:59.331622 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:17:05 crc kubenswrapper[4877]: I0128 17:17:05.831047 4877 generic.go:334] "Generic (PLEG): container finished" podID="ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a" containerID="22c7d63860e5a89fd71d6783f2ea6c8b14d2e09e18d6828095b9d2c37d789d2f" exitCode=0 Jan 28 17:17:05 crc kubenswrapper[4877]: I0128 17:17:05.831139 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" event={"ID":"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a","Type":"ContainerDied","Data":"22c7d63860e5a89fd71d6783f2ea6c8b14d2e09e18d6828095b9d2c37d789d2f"} Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.390138 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.561362 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-inventory\") pod \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.561444 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7rbr\" (UniqueName: \"kubernetes.io/projected/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-kube-api-access-c7rbr\") pod \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.561738 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-ssh-key-openstack-edpm-ipam\") pod \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\" (UID: \"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a\") " Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.573559 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-kube-api-access-c7rbr" (OuterVolumeSpecName: "kube-api-access-c7rbr") pod "ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a" (UID: "ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a"). InnerVolumeSpecName "kube-api-access-c7rbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.595625 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a" (UID: "ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.601725 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-inventory" (OuterVolumeSpecName: "inventory") pod "ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a" (UID: "ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.665047 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7rbr\" (UniqueName: \"kubernetes.io/projected/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-kube-api-access-c7rbr\") on node \"crc\" DevicePath \"\"" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.665090 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.665104 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.856120 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" event={"ID":"ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a","Type":"ContainerDied","Data":"5055b60ce9147e5b340e4495fcc009644d47bdbb37f1d04c38e67690585e05f7"} Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.856169 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5055b60ce9147e5b340e4495fcc009644d47bdbb37f1d04c38e67690585e05f7" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.856196 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-2pxhg" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.955168 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67"] Jan 28 17:17:07 crc kubenswrapper[4877]: E0128 17:17:07.955953 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.955977 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.956371 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce33fee3-8cb5-45a1-b3fb-7e6f9fb9405a" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.957280 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.959417 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.960733 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.962032 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.962347 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:17:07 crc kubenswrapper[4877]: I0128 17:17:07.988615 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67"] Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.076894 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.077018 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.077052 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65jt8\" (UniqueName: \"kubernetes.io/projected/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-kube-api-access-65jt8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.179650 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.179802 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.179852 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65jt8\" (UniqueName: \"kubernetes.io/projected/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-kube-api-access-65jt8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.184295 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.186401 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.200294 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65jt8\" (UniqueName: \"kubernetes.io/projected/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-kube-api-access-65jt8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.280725 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:08 crc kubenswrapper[4877]: I0128 17:17:08.891900 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67"] Jan 28 17:17:09 crc kubenswrapper[4877]: I0128 17:17:09.883929 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" event={"ID":"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd","Type":"ContainerStarted","Data":"646c48afc21d8f952b2c79756f659a3d64a3c0d2f0bfb288974869e0da8818c1"} Jan 28 17:17:10 crc kubenswrapper[4877]: I0128 17:17:10.896588 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" event={"ID":"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd","Type":"ContainerStarted","Data":"75c9848b91e500e8f66369ab6af035c31a2ea043ddf8e2d6bee694057b9840e3"} Jan 28 17:17:10 crc kubenswrapper[4877]: I0128 17:17:10.918685 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" podStartSLOduration=3.188779956 podStartE2EDuration="3.918665432s" podCreationTimestamp="2026-01-28 17:17:07 +0000 UTC" firstStartedPulling="2026-01-28 17:17:08.895879071 +0000 UTC m=+2532.454205969" lastFinishedPulling="2026-01-28 17:17:09.625764557 +0000 UTC m=+2533.184091445" observedRunningTime="2026-01-28 17:17:10.910871401 +0000 UTC m=+2534.469198289" watchObservedRunningTime="2026-01-28 17:17:10.918665432 +0000 UTC m=+2534.476992320" Jan 28 17:17:13 crc kubenswrapper[4877]: I0128 17:17:13.330592 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:17:13 crc kubenswrapper[4877]: E0128 17:17:13.331391 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:17:18 crc kubenswrapper[4877]: I0128 17:17:18.974299 4877 generic.go:334] "Generic (PLEG): container finished" podID="8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd" containerID="75c9848b91e500e8f66369ab6af035c31a2ea043ddf8e2d6bee694057b9840e3" exitCode=0 Jan 28 17:17:18 crc kubenswrapper[4877]: I0128 17:17:18.974381 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" event={"ID":"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd","Type":"ContainerDied","Data":"75c9848b91e500e8f66369ab6af035c31a2ea043ddf8e2d6bee694057b9840e3"} Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.468285 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.500004 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-inventory\") pod \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.500085 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65jt8\" (UniqueName: \"kubernetes.io/projected/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-kube-api-access-65jt8\") pod \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.500367 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-ssh-key-openstack-edpm-ipam\") pod \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\" (UID: \"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd\") " Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.505753 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-kube-api-access-65jt8" (OuterVolumeSpecName: "kube-api-access-65jt8") pod "8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd" (UID: "8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd"). InnerVolumeSpecName "kube-api-access-65jt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.532297 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-inventory" (OuterVolumeSpecName: "inventory") pod "8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd" (UID: "8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.545660 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd" (UID: "8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.602219 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.602270 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.602283 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65jt8\" (UniqueName: \"kubernetes.io/projected/8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd-kube-api-access-65jt8\") on node \"crc\" DevicePath \"\"" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.998243 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" event={"ID":"8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd","Type":"ContainerDied","Data":"646c48afc21d8f952b2c79756f659a3d64a3c0d2f0bfb288974869e0da8818c1"} Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.998288 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="646c48afc21d8f952b2c79756f659a3d64a3c0d2f0bfb288974869e0da8818c1" Jan 28 17:17:20 crc kubenswrapper[4877]: I0128 17:17:20.998316 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-bfg67" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.119382 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm"] Jan 28 17:17:21 crc kubenswrapper[4877]: E0128 17:17:21.120784 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.120816 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.121562 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fa5b1a1-6603-46f0-971b-d4ebafa9b8dd" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.123022 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.126498 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.126592 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.126620 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.126794 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.126793 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.126832 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.126913 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.127672 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.129543 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.139673 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm"] Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317063 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317127 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317622 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317672 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317718 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317767 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvs5b\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-kube-api-access-xvs5b\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317918 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.317956 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318039 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318105 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318142 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318169 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318241 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318299 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318349 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.318419 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.420980 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.421059 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.421945 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422098 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422134 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422176 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422244 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvs5b\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-kube-api-access-xvs5b\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422339 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422368 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422430 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422525 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422560 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422585 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422667 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.422736 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.425424 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.425552 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.426588 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.427131 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.427957 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.428522 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.430534 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.436215 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.436273 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.436268 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.438124 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.438238 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.438266 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.438433 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.439120 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.448932 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvs5b\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-kube-api-access-xvs5b\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:21 crc kubenswrapper[4877]: I0128 17:17:21.451978 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:17:22 crc kubenswrapper[4877]: I0128 17:17:22.028727 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm"] Jan 28 17:17:22 crc kubenswrapper[4877]: W0128 17:17:22.035415 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4307a337_2f48_4216_989f_53dc74fb330a.slice/crio-dbd4d314ea7e7a0bcd71347b037732c8c3cdad8465ffc8790a380d03ea19c37d WatchSource:0}: Error finding container dbd4d314ea7e7a0bcd71347b037732c8c3cdad8465ffc8790a380d03ea19c37d: Status 404 returned error can't find the container with id dbd4d314ea7e7a0bcd71347b037732c8c3cdad8465ffc8790a380d03ea19c37d Jan 28 17:17:23 crc kubenswrapper[4877]: I0128 17:17:23.019783 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" event={"ID":"4307a337-2f48-4216-989f-53dc74fb330a","Type":"ContainerStarted","Data":"dbd4d314ea7e7a0bcd71347b037732c8c3cdad8465ffc8790a380d03ea19c37d"} Jan 28 17:17:24 crc kubenswrapper[4877]: I0128 17:17:24.030947 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" event={"ID":"4307a337-2f48-4216-989f-53dc74fb330a","Type":"ContainerStarted","Data":"47eacdbfd2eb37417f81aeca83a498b4ed23649840b9748eb480ca72e80cf9dc"} Jan 28 17:17:24 crc kubenswrapper[4877]: I0128 17:17:24.059804 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" podStartSLOduration=1.582693351 podStartE2EDuration="3.05978689s" podCreationTimestamp="2026-01-28 17:17:21 +0000 UTC" firstStartedPulling="2026-01-28 17:17:22.053976358 +0000 UTC m=+2545.612303246" lastFinishedPulling="2026-01-28 17:17:23.531069897 +0000 UTC m=+2547.089396785" observedRunningTime="2026-01-28 17:17:24.052196716 +0000 UTC m=+2547.610523624" watchObservedRunningTime="2026-01-28 17:17:24.05978689 +0000 UTC m=+2547.618113778" Jan 28 17:17:28 crc kubenswrapper[4877]: I0128 17:17:28.330671 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:17:28 crc kubenswrapper[4877]: E0128 17:17:28.331357 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:17:41 crc kubenswrapper[4877]: I0128 17:17:41.332599 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:17:42 crc kubenswrapper[4877]: I0128 17:17:42.229427 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"0c5ff514a6eff15ca95ea02e7d577a96c1dbadfc956ddd5d6cbffc146f2313db"} Jan 28 17:17:44 crc kubenswrapper[4877]: I0128 17:17:44.050768 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-79fbr"] Jan 28 17:17:44 crc kubenswrapper[4877]: I0128 17:17:44.062826 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-79fbr"] Jan 28 17:17:45 crc kubenswrapper[4877]: I0128 17:17:45.347745 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b6f7515-689a-4a2e-807d-dde5d82975a7" path="/var/lib/kubelet/pods/3b6f7515-689a-4a2e-807d-dde5d82975a7/volumes" Jan 28 17:18:03 crc kubenswrapper[4877]: I0128 17:18:03.486750 4877 generic.go:334] "Generic (PLEG): container finished" podID="4307a337-2f48-4216-989f-53dc74fb330a" containerID="47eacdbfd2eb37417f81aeca83a498b4ed23649840b9748eb480ca72e80cf9dc" exitCode=0 Jan 28 17:18:03 crc kubenswrapper[4877]: I0128 17:18:03.486893 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" event={"ID":"4307a337-2f48-4216-989f-53dc74fb330a","Type":"ContainerDied","Data":"47eacdbfd2eb37417f81aeca83a498b4ed23649840b9748eb480ca72e80cf9dc"} Jan 28 17:18:04 crc kubenswrapper[4877]: I0128 17:18:04.984647 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.142199 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-nova-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.142285 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-inventory\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.142471 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-power-monitoring-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.143739 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-neutron-metadata-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.143792 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-bootstrap-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.143836 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.143869 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.143908 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.143954 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.144050 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-repo-setup-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.144151 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvs5b\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-kube-api-access-xvs5b\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.144206 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.144264 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-libvirt-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.144305 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ssh-key-openstack-edpm-ipam\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.144340 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.144572 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ovn-combined-ca-bundle\") pod \"4307a337-2f48-4216-989f-53dc74fb330a\" (UID: \"4307a337-2f48-4216-989f-53dc74fb330a\") " Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.151984 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.152819 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.152873 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.153279 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.153703 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-kube-api-access-xvs5b" (OuterVolumeSpecName: "kube-api-access-xvs5b") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "kube-api-access-xvs5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.154034 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.156949 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.157074 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.158192 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.158266 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.160708 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.160746 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.160863 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.161075 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.188328 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-inventory" (OuterVolumeSpecName: "inventory") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.188828 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4307a337-2f48-4216-989f-53dc74fb330a" (UID: "4307a337-2f48-4216-989f-53dc74fb330a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.247932 4877 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.247978 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvs5b\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-kube-api-access-xvs5b\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.247994 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248007 4877 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248025 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248039 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248055 4877 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248065 4877 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248075 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248086 4877 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248098 4877 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248145 4877 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248158 4877 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4307a337-2f48-4216-989f-53dc74fb330a-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248170 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248182 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.248196 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4307a337-2f48-4216-989f-53dc74fb330a-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.526793 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" event={"ID":"4307a337-2f48-4216-989f-53dc74fb330a","Type":"ContainerDied","Data":"dbd4d314ea7e7a0bcd71347b037732c8c3cdad8465ffc8790a380d03ea19c37d"} Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.526840 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbd4d314ea7e7a0bcd71347b037732c8c3cdad8465ffc8790a380d03ea19c37d" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.526935 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-ds6qm" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.628587 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4"] Jan 28 17:18:05 crc kubenswrapper[4877]: E0128 17:18:05.629765 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4307a337-2f48-4216-989f-53dc74fb330a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.629798 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4307a337-2f48-4216-989f-53dc74fb330a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.630133 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4307a337-2f48-4216-989f-53dc74fb330a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.631237 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.634856 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.634858 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.635224 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.635392 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.635597 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.645466 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4"] Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.761209 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.761363 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd2cw\" (UniqueName: \"kubernetes.io/projected/440d2a47-95b1-410b-8356-c3d6d57e4030-kube-api-access-sd2cw\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.761414 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/440d2a47-95b1-410b-8356-c3d6d57e4030-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.761433 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.761463 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.863914 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd2cw\" (UniqueName: \"kubernetes.io/projected/440d2a47-95b1-410b-8356-c3d6d57e4030-kube-api-access-sd2cw\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.864030 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/440d2a47-95b1-410b-8356-c3d6d57e4030-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.864054 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.864100 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.865040 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.865069 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/440d2a47-95b1-410b-8356-c3d6d57e4030-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.870182 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.870583 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.872685 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.883134 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd2cw\" (UniqueName: \"kubernetes.io/projected/440d2a47-95b1-410b-8356-c3d6d57e4030-kube-api-access-sd2cw\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8vfx4\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:05 crc kubenswrapper[4877]: I0128 17:18:05.952313 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:18:07 crc kubenswrapper[4877]: I0128 17:18:06.507271 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4"] Jan 28 17:18:07 crc kubenswrapper[4877]: I0128 17:18:06.538948 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" event={"ID":"440d2a47-95b1-410b-8356-c3d6d57e4030","Type":"ContainerStarted","Data":"3cd3de73ed8c8785d6756ac451d34d36f3943ada971bb0cb380a9996ef952912"} Jan 28 17:18:08 crc kubenswrapper[4877]: I0128 17:18:08.579577 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" event={"ID":"440d2a47-95b1-410b-8356-c3d6d57e4030","Type":"ContainerStarted","Data":"5368188c9312ee32e21a25ad3cdb88e38ca6b81dd679d654df1c3c812e741aa2"} Jan 28 17:18:08 crc kubenswrapper[4877]: I0128 17:18:08.623822 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" podStartSLOduration=2.793306688 podStartE2EDuration="3.623800056s" podCreationTimestamp="2026-01-28 17:18:05 +0000 UTC" firstStartedPulling="2026-01-28 17:18:06.507810623 +0000 UTC m=+2590.066137511" lastFinishedPulling="2026-01-28 17:18:07.338303991 +0000 UTC m=+2590.896630879" observedRunningTime="2026-01-28 17:18:08.598448362 +0000 UTC m=+2592.156775250" watchObservedRunningTime="2026-01-28 17:18:08.623800056 +0000 UTC m=+2592.182126944" Jan 28 17:18:30 crc kubenswrapper[4877]: I0128 17:18:30.326543 4877 scope.go:117] "RemoveContainer" containerID="75d6cd604269124281c2a85b0e721da2d26f8d9277a0181961032ca67f8a0ec5" Jan 28 17:19:07 crc kubenswrapper[4877]: I0128 17:19:07.254700 4877 generic.go:334] "Generic (PLEG): container finished" podID="440d2a47-95b1-410b-8356-c3d6d57e4030" containerID="5368188c9312ee32e21a25ad3cdb88e38ca6b81dd679d654df1c3c812e741aa2" exitCode=0 Jan 28 17:19:07 crc kubenswrapper[4877]: I0128 17:19:07.254794 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" event={"ID":"440d2a47-95b1-410b-8356-c3d6d57e4030","Type":"ContainerDied","Data":"5368188c9312ee32e21a25ad3cdb88e38ca6b81dd679d654df1c3c812e741aa2"} Jan 28 17:19:08 crc kubenswrapper[4877]: I0128 17:19:08.826743 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:19:08 crc kubenswrapper[4877]: I0128 17:19:08.999833 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd2cw\" (UniqueName: \"kubernetes.io/projected/440d2a47-95b1-410b-8356-c3d6d57e4030-kube-api-access-sd2cw\") pod \"440d2a47-95b1-410b-8356-c3d6d57e4030\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.000181 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ovn-combined-ca-bundle\") pod \"440d2a47-95b1-410b-8356-c3d6d57e4030\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.000631 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ssh-key-openstack-edpm-ipam\") pod \"440d2a47-95b1-410b-8356-c3d6d57e4030\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.000952 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-inventory\") pod \"440d2a47-95b1-410b-8356-c3d6d57e4030\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.001113 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/440d2a47-95b1-410b-8356-c3d6d57e4030-ovncontroller-config-0\") pod \"440d2a47-95b1-410b-8356-c3d6d57e4030\" (UID: \"440d2a47-95b1-410b-8356-c3d6d57e4030\") " Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.017599 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/440d2a47-95b1-410b-8356-c3d6d57e4030-kube-api-access-sd2cw" (OuterVolumeSpecName: "kube-api-access-sd2cw") pod "440d2a47-95b1-410b-8356-c3d6d57e4030" (UID: "440d2a47-95b1-410b-8356-c3d6d57e4030"). InnerVolumeSpecName "kube-api-access-sd2cw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.017599 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "440d2a47-95b1-410b-8356-c3d6d57e4030" (UID: "440d2a47-95b1-410b-8356-c3d6d57e4030"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.033703 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-inventory" (OuterVolumeSpecName: "inventory") pod "440d2a47-95b1-410b-8356-c3d6d57e4030" (UID: "440d2a47-95b1-410b-8356-c3d6d57e4030"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.052633 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/440d2a47-95b1-410b-8356-c3d6d57e4030-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "440d2a47-95b1-410b-8356-c3d6d57e4030" (UID: "440d2a47-95b1-410b-8356-c3d6d57e4030"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.056725 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "440d2a47-95b1-410b-8356-c3d6d57e4030" (UID: "440d2a47-95b1-410b-8356-c3d6d57e4030"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.104457 4877 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.104512 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.104521 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/440d2a47-95b1-410b-8356-c3d6d57e4030-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.104530 4877 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/440d2a47-95b1-410b-8356-c3d6d57e4030-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.104538 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd2cw\" (UniqueName: \"kubernetes.io/projected/440d2a47-95b1-410b-8356-c3d6d57e4030-kube-api-access-sd2cw\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.277430 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" event={"ID":"440d2a47-95b1-410b-8356-c3d6d57e4030","Type":"ContainerDied","Data":"3cd3de73ed8c8785d6756ac451d34d36f3943ada971bb0cb380a9996ef952912"} Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.277493 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3cd3de73ed8c8785d6756ac451d34d36f3943ada971bb0cb380a9996ef952912" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.277551 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8vfx4" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.423002 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2"] Jan 28 17:19:09 crc kubenswrapper[4877]: E0128 17:19:09.424431 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440d2a47-95b1-410b-8356-c3d6d57e4030" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.424459 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="440d2a47-95b1-410b-8356-c3d6d57e4030" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.424726 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="440d2a47-95b1-410b-8356-c3d6d57e4030" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.425911 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.432454 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.432577 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.432888 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.433040 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.433250 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.433280 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.460695 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2"] Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.618357 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.618510 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.618581 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.618618 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.618900 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jc4b\" (UniqueName: \"kubernetes.io/projected/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-kube-api-access-5jc4b\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.619005 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.721586 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.721708 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.721806 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.721866 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.721891 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.721948 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jc4b\" (UniqueName: \"kubernetes.io/projected/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-kube-api-access-5jc4b\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.729276 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.729591 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.729673 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.731781 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.734523 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.755781 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jc4b\" (UniqueName: \"kubernetes.io/projected/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-kube-api-access-5jc4b\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:09 crc kubenswrapper[4877]: I0128 17:19:09.757879 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:10 crc kubenswrapper[4877]: I0128 17:19:10.311713 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2"] Jan 28 17:19:10 crc kubenswrapper[4877]: W0128 17:19:10.322704 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14cf6c10_8e81_4bc7_91ce_5bb9fa39cda5.slice/crio-3bfdb1acf2f813cf159aa0f31fbf8e6f9b7199ab71ffacc89ea4ac1c0dea4bbb WatchSource:0}: Error finding container 3bfdb1acf2f813cf159aa0f31fbf8e6f9b7199ab71ffacc89ea4ac1c0dea4bbb: Status 404 returned error can't find the container with id 3bfdb1acf2f813cf159aa0f31fbf8e6f9b7199ab71ffacc89ea4ac1c0dea4bbb Jan 28 17:19:10 crc kubenswrapper[4877]: I0128 17:19:10.326636 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:19:11 crc kubenswrapper[4877]: I0128 17:19:11.300958 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" event={"ID":"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5","Type":"ContainerStarted","Data":"ce7ffe4a1b52e395d0aa206c8e14b70adebd41d0e8a8e3035a6009344f088faa"} Jan 28 17:19:11 crc kubenswrapper[4877]: I0128 17:19:11.301677 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" event={"ID":"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5","Type":"ContainerStarted","Data":"3bfdb1acf2f813cf159aa0f31fbf8e6f9b7199ab71ffacc89ea4ac1c0dea4bbb"} Jan 28 17:19:12 crc kubenswrapper[4877]: I0128 17:19:12.346965 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" podStartSLOduration=2.860023555 podStartE2EDuration="3.346935952s" podCreationTimestamp="2026-01-28 17:19:09 +0000 UTC" firstStartedPulling="2026-01-28 17:19:10.32631566 +0000 UTC m=+2653.884642548" lastFinishedPulling="2026-01-28 17:19:10.813228057 +0000 UTC m=+2654.371554945" observedRunningTime="2026-01-28 17:19:12.333048937 +0000 UTC m=+2655.891375835" watchObservedRunningTime="2026-01-28 17:19:12.346935952 +0000 UTC m=+2655.905262840" Jan 28 17:19:56 crc kubenswrapper[4877]: I0128 17:19:56.799117 4877 generic.go:334] "Generic (PLEG): container finished" podID="14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" containerID="ce7ffe4a1b52e395d0aa206c8e14b70adebd41d0e8a8e3035a6009344f088faa" exitCode=0 Jan 28 17:19:56 crc kubenswrapper[4877]: I0128 17:19:56.799230 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" event={"ID":"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5","Type":"ContainerDied","Data":"ce7ffe4a1b52e395d0aa206c8e14b70adebd41d0e8a8e3035a6009344f088faa"} Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.315037 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.481548 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-ovn-metadata-agent-neutron-config-0\") pod \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.481663 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-inventory\") pod \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.481702 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jc4b\" (UniqueName: \"kubernetes.io/projected/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-kube-api-access-5jc4b\") pod \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.481761 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-ssh-key-openstack-edpm-ipam\") pod \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.481806 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-metadata-combined-ca-bundle\") pod \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.481862 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-nova-metadata-neutron-config-0\") pod \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\" (UID: \"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5\") " Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.488850 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" (UID: "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.489166 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-kube-api-access-5jc4b" (OuterVolumeSpecName: "kube-api-access-5jc4b") pod "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" (UID: "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5"). InnerVolumeSpecName "kube-api-access-5jc4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.516650 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" (UID: "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.517089 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" (UID: "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.518115 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-inventory" (OuterVolumeSpecName: "inventory") pod "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" (UID: "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.530099 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" (UID: "14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.584941 4877 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.584979 4877 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.584992 4877 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.585009 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.585019 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jc4b\" (UniqueName: \"kubernetes.io/projected/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-kube-api-access-5jc4b\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.585030 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.821001 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" event={"ID":"14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5","Type":"ContainerDied","Data":"3bfdb1acf2f813cf159aa0f31fbf8e6f9b7199ab71ffacc89ea4ac1c0dea4bbb"} Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.821047 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bfdb1acf2f813cf159aa0f31fbf8e6f9b7199ab71ffacc89ea4ac1c0dea4bbb" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.821086 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-dfnv2" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.946090 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl"] Jan 28 17:19:58 crc kubenswrapper[4877]: E0128 17:19:58.946577 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.946594 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.946840 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="14cf6c10-8e81-4bc7-91ce-5bb9fa39cda5" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.947681 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.949908 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.950036 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.950065 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.950125 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.950315 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 28 17:19:58 crc kubenswrapper[4877]: I0128 17:19:58.968006 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl"] Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.106774 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.106866 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.107118 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgqdf\" (UniqueName: \"kubernetes.io/projected/b4e57b01-768b-4c29-81a7-a7a755401cc6-kube-api-access-fgqdf\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.107597 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.107654 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.209702 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.209776 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.209907 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgqdf\" (UniqueName: \"kubernetes.io/projected/b4e57b01-768b-4c29-81a7-a7a755401cc6-kube-api-access-fgqdf\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.210086 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.210108 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.213925 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.214212 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.215189 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.225526 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.230259 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgqdf\" (UniqueName: \"kubernetes.io/projected/b4e57b01-768b-4c29-81a7-a7a755401cc6-kube-api-access-fgqdf\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-lrftl\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.272031 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:19:59 crc kubenswrapper[4877]: I0128 17:19:59.857926 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl"] Jan 28 17:20:00 crc kubenswrapper[4877]: I0128 17:20:00.848888 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" event={"ID":"b4e57b01-768b-4c29-81a7-a7a755401cc6","Type":"ContainerStarted","Data":"04c70ee48a61cf72e7d6d8d605023078f8d704937cce91285301b0511b393ea9"} Jan 28 17:20:00 crc kubenswrapper[4877]: I0128 17:20:00.849187 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" event={"ID":"b4e57b01-768b-4c29-81a7-a7a755401cc6","Type":"ContainerStarted","Data":"682d5d880adc930e012a8ad91b296fc9b197a4bacd6275fe19459f215f1876a4"} Jan 28 17:20:00 crc kubenswrapper[4877]: I0128 17:20:00.877547 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" podStartSLOduration=2.40650349 podStartE2EDuration="2.877523547s" podCreationTimestamp="2026-01-28 17:19:58 +0000 UTC" firstStartedPulling="2026-01-28 17:19:59.861301952 +0000 UTC m=+2703.419628840" lastFinishedPulling="2026-01-28 17:20:00.332322009 +0000 UTC m=+2703.890648897" observedRunningTime="2026-01-28 17:20:00.867339722 +0000 UTC m=+2704.425666610" watchObservedRunningTime="2026-01-28 17:20:00.877523547 +0000 UTC m=+2704.435850425" Jan 28 17:20:07 crc kubenswrapper[4877]: I0128 17:20:07.076591 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:20:07 crc kubenswrapper[4877]: I0128 17:20:07.077332 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:20:37 crc kubenswrapper[4877]: I0128 17:20:37.076172 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:20:37 crc kubenswrapper[4877]: I0128 17:20:37.077845 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.076110 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.077731 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.077800 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.078930 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0c5ff514a6eff15ca95ea02e7d577a96c1dbadfc956ddd5d6cbffc146f2313db"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.079000 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://0c5ff514a6eff15ca95ea02e7d577a96c1dbadfc956ddd5d6cbffc146f2313db" gracePeriod=600 Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.733936 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="0c5ff514a6eff15ca95ea02e7d577a96c1dbadfc956ddd5d6cbffc146f2313db" exitCode=0 Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.734034 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"0c5ff514a6eff15ca95ea02e7d577a96c1dbadfc956ddd5d6cbffc146f2313db"} Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.734441 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1"} Jan 28 17:21:07 crc kubenswrapper[4877]: I0128 17:21:07.734491 4877 scope.go:117] "RemoveContainer" containerID="d5d95ec48dafcb4c841d32cc32f9a067391365e8329d49474b1b09f614724ef3" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.388610 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z6htt"] Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.392927 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.402746 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6htt"] Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.453034 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-utilities\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.453316 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hmct\" (UniqueName: \"kubernetes.io/projected/f9f8de57-0eac-41e5-8a3c-a762d143673d-kube-api-access-2hmct\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.453528 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-catalog-content\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.556262 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hmct\" (UniqueName: \"kubernetes.io/projected/f9f8de57-0eac-41e5-8a3c-a762d143673d-kube-api-access-2hmct\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.556413 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-catalog-content\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.556588 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-utilities\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.557238 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-utilities\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.557265 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-catalog-content\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.580656 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hmct\" (UniqueName: \"kubernetes.io/projected/f9f8de57-0eac-41e5-8a3c-a762d143673d-kube-api-access-2hmct\") pod \"redhat-marketplace-z6htt\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:23 crc kubenswrapper[4877]: I0128 17:21:23.732586 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:24 crc kubenswrapper[4877]: I0128 17:21:24.284978 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6htt"] Jan 28 17:21:24 crc kubenswrapper[4877]: I0128 17:21:24.927416 4877 generic.go:334] "Generic (PLEG): container finished" podID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerID="d5afcd581aa929c73c42bf70609b4cc6154fc51a4bad38388f1e32c81ec7d146" exitCode=0 Jan 28 17:21:24 crc kubenswrapper[4877]: I0128 17:21:24.927523 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6htt" event={"ID":"f9f8de57-0eac-41e5-8a3c-a762d143673d","Type":"ContainerDied","Data":"d5afcd581aa929c73c42bf70609b4cc6154fc51a4bad38388f1e32c81ec7d146"} Jan 28 17:21:24 crc kubenswrapper[4877]: I0128 17:21:24.927768 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6htt" event={"ID":"f9f8de57-0eac-41e5-8a3c-a762d143673d","Type":"ContainerStarted","Data":"f4e95fe4fd1c7a58cf0d61d9b51c05167a8f0c0b406c6b9cc1a2d19ffd07388a"} Jan 28 17:21:25 crc kubenswrapper[4877]: I0128 17:21:25.942074 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6htt" event={"ID":"f9f8de57-0eac-41e5-8a3c-a762d143673d","Type":"ContainerStarted","Data":"8be8149a49f5658f7093f97e86af8eac16fbae8b5fe7b1ceee7c64312a938af4"} Jan 28 17:21:26 crc kubenswrapper[4877]: I0128 17:21:26.959463 4877 generic.go:334] "Generic (PLEG): container finished" podID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerID="8be8149a49f5658f7093f97e86af8eac16fbae8b5fe7b1ceee7c64312a938af4" exitCode=0 Jan 28 17:21:26 crc kubenswrapper[4877]: I0128 17:21:26.959552 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6htt" event={"ID":"f9f8de57-0eac-41e5-8a3c-a762d143673d","Type":"ContainerDied","Data":"8be8149a49f5658f7093f97e86af8eac16fbae8b5fe7b1ceee7c64312a938af4"} Jan 28 17:21:27 crc kubenswrapper[4877]: I0128 17:21:27.971783 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6htt" event={"ID":"f9f8de57-0eac-41e5-8a3c-a762d143673d","Type":"ContainerStarted","Data":"17c3bf1afdd8f383c96a13f492c977262b85c2eadb1a57f283b506889b3988d5"} Jan 28 17:21:28 crc kubenswrapper[4877]: I0128 17:21:28.001330 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z6htt" podStartSLOduration=2.5534053549999998 podStartE2EDuration="5.001307857s" podCreationTimestamp="2026-01-28 17:21:23 +0000 UTC" firstStartedPulling="2026-01-28 17:21:24.929602917 +0000 UTC m=+2788.487929805" lastFinishedPulling="2026-01-28 17:21:27.377505419 +0000 UTC m=+2790.935832307" observedRunningTime="2026-01-28 17:21:27.990448124 +0000 UTC m=+2791.548775012" watchObservedRunningTime="2026-01-28 17:21:28.001307857 +0000 UTC m=+2791.559634745" Jan 28 17:21:33 crc kubenswrapper[4877]: I0128 17:21:33.733299 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:33 crc kubenswrapper[4877]: I0128 17:21:33.733830 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:33 crc kubenswrapper[4877]: I0128 17:21:33.782303 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:34 crc kubenswrapper[4877]: I0128 17:21:34.095253 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:34 crc kubenswrapper[4877]: I0128 17:21:34.550460 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6htt"] Jan 28 17:21:36 crc kubenswrapper[4877]: I0128 17:21:36.055215 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z6htt" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="registry-server" containerID="cri-o://17c3bf1afdd8f383c96a13f492c977262b85c2eadb1a57f283b506889b3988d5" gracePeriod=2 Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.069741 4877 generic.go:334] "Generic (PLEG): container finished" podID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerID="17c3bf1afdd8f383c96a13f492c977262b85c2eadb1a57f283b506889b3988d5" exitCode=0 Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.069803 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6htt" event={"ID":"f9f8de57-0eac-41e5-8a3c-a762d143673d","Type":"ContainerDied","Data":"17c3bf1afdd8f383c96a13f492c977262b85c2eadb1a57f283b506889b3988d5"} Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.502608 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.536190 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hmct\" (UniqueName: \"kubernetes.io/projected/f9f8de57-0eac-41e5-8a3c-a762d143673d-kube-api-access-2hmct\") pod \"f9f8de57-0eac-41e5-8a3c-a762d143673d\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.536375 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-catalog-content\") pod \"f9f8de57-0eac-41e5-8a3c-a762d143673d\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.536428 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-utilities\") pod \"f9f8de57-0eac-41e5-8a3c-a762d143673d\" (UID: \"f9f8de57-0eac-41e5-8a3c-a762d143673d\") " Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.537772 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-utilities" (OuterVolumeSpecName: "utilities") pod "f9f8de57-0eac-41e5-8a3c-a762d143673d" (UID: "f9f8de57-0eac-41e5-8a3c-a762d143673d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.544750 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9f8de57-0eac-41e5-8a3c-a762d143673d-kube-api-access-2hmct" (OuterVolumeSpecName: "kube-api-access-2hmct") pod "f9f8de57-0eac-41e5-8a3c-a762d143673d" (UID: "f9f8de57-0eac-41e5-8a3c-a762d143673d"). InnerVolumeSpecName "kube-api-access-2hmct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.576174 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9f8de57-0eac-41e5-8a3c-a762d143673d" (UID: "f9f8de57-0eac-41e5-8a3c-a762d143673d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.639620 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.639651 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9f8de57-0eac-41e5-8a3c-a762d143673d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:21:37 crc kubenswrapper[4877]: I0128 17:21:37.639661 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hmct\" (UniqueName: \"kubernetes.io/projected/f9f8de57-0eac-41e5-8a3c-a762d143673d-kube-api-access-2hmct\") on node \"crc\" DevicePath \"\"" Jan 28 17:21:38 crc kubenswrapper[4877]: I0128 17:21:38.083904 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z6htt" event={"ID":"f9f8de57-0eac-41e5-8a3c-a762d143673d","Type":"ContainerDied","Data":"f4e95fe4fd1c7a58cf0d61d9b51c05167a8f0c0b406c6b9cc1a2d19ffd07388a"} Jan 28 17:21:38 crc kubenswrapper[4877]: I0128 17:21:38.083963 4877 scope.go:117] "RemoveContainer" containerID="17c3bf1afdd8f383c96a13f492c977262b85c2eadb1a57f283b506889b3988d5" Jan 28 17:21:38 crc kubenswrapper[4877]: I0128 17:21:38.083967 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z6htt" Jan 28 17:21:38 crc kubenswrapper[4877]: I0128 17:21:38.109832 4877 scope.go:117] "RemoveContainer" containerID="8be8149a49f5658f7093f97e86af8eac16fbae8b5fe7b1ceee7c64312a938af4" Jan 28 17:21:38 crc kubenswrapper[4877]: I0128 17:21:38.124166 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6htt"] Jan 28 17:21:38 crc kubenswrapper[4877]: I0128 17:21:38.141308 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z6htt"] Jan 28 17:21:38 crc kubenswrapper[4877]: I0128 17:21:38.152291 4877 scope.go:117] "RemoveContainer" containerID="d5afcd581aa929c73c42bf70609b4cc6154fc51a4bad38388f1e32c81ec7d146" Jan 28 17:21:39 crc kubenswrapper[4877]: I0128 17:21:39.353424 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" path="/var/lib/kubelet/pods/f9f8de57-0eac-41e5-8a3c-a762d143673d/volumes" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.629913 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qjc5h"] Jan 28 17:22:42 crc kubenswrapper[4877]: E0128 17:22:42.631598 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="registry-server" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.631619 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="registry-server" Jan 28 17:22:42 crc kubenswrapper[4877]: E0128 17:22:42.631632 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="extract-utilities" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.631641 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="extract-utilities" Jan 28 17:22:42 crc kubenswrapper[4877]: E0128 17:22:42.631676 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="extract-content" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.631683 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="extract-content" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.631997 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9f8de57-0eac-41e5-8a3c-a762d143673d" containerName="registry-server" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.634112 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.652155 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qjc5h"] Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.714537 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-utilities\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.714711 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfsxh\" (UniqueName: \"kubernetes.io/projected/78c621c3-e709-4efa-9eb0-343cdd9b6440-kube-api-access-gfsxh\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.714906 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-catalog-content\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.817820 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-utilities\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.818083 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfsxh\" (UniqueName: \"kubernetes.io/projected/78c621c3-e709-4efa-9eb0-343cdd9b6440-kube-api-access-gfsxh\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.818155 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-catalog-content\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.818485 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-utilities\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.818762 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-catalog-content\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.851403 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfsxh\" (UniqueName: \"kubernetes.io/projected/78c621c3-e709-4efa-9eb0-343cdd9b6440-kube-api-access-gfsxh\") pod \"certified-operators-qjc5h\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:42 crc kubenswrapper[4877]: I0128 17:22:42.975247 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:43 crc kubenswrapper[4877]: I0128 17:22:43.590360 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qjc5h"] Jan 28 17:22:43 crc kubenswrapper[4877]: I0128 17:22:43.842459 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qjc5h" event={"ID":"78c621c3-e709-4efa-9eb0-343cdd9b6440","Type":"ContainerStarted","Data":"e605a2404c7b3e53b25f688305962d09d1258f6b2be54c08fa2435a0558b35b4"} Jan 28 17:22:44 crc kubenswrapper[4877]: I0128 17:22:44.870863 4877 generic.go:334] "Generic (PLEG): container finished" podID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerID="da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356" exitCode=0 Jan 28 17:22:44 crc kubenswrapper[4877]: I0128 17:22:44.871173 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qjc5h" event={"ID":"78c621c3-e709-4efa-9eb0-343cdd9b6440","Type":"ContainerDied","Data":"da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356"} Jan 28 17:22:46 crc kubenswrapper[4877]: I0128 17:22:46.897337 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qjc5h" event={"ID":"78c621c3-e709-4efa-9eb0-343cdd9b6440","Type":"ContainerStarted","Data":"77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85"} Jan 28 17:22:48 crc kubenswrapper[4877]: I0128 17:22:48.920427 4877 generic.go:334] "Generic (PLEG): container finished" podID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerID="77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85" exitCode=0 Jan 28 17:22:48 crc kubenswrapper[4877]: I0128 17:22:48.920721 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qjc5h" event={"ID":"78c621c3-e709-4efa-9eb0-343cdd9b6440","Type":"ContainerDied","Data":"77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85"} Jan 28 17:22:49 crc kubenswrapper[4877]: I0128 17:22:49.933912 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qjc5h" event={"ID":"78c621c3-e709-4efa-9eb0-343cdd9b6440","Type":"ContainerStarted","Data":"f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300"} Jan 28 17:22:49 crc kubenswrapper[4877]: I0128 17:22:49.963265 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qjc5h" podStartSLOduration=3.449168989 podStartE2EDuration="7.963241242s" podCreationTimestamp="2026-01-28 17:22:42 +0000 UTC" firstStartedPulling="2026-01-28 17:22:44.874617919 +0000 UTC m=+2868.432944807" lastFinishedPulling="2026-01-28 17:22:49.388690172 +0000 UTC m=+2872.947017060" observedRunningTime="2026-01-28 17:22:49.954771394 +0000 UTC m=+2873.513098282" watchObservedRunningTime="2026-01-28 17:22:49.963241242 +0000 UTC m=+2873.521568140" Jan 28 17:22:52 crc kubenswrapper[4877]: I0128 17:22:52.975656 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:52 crc kubenswrapper[4877]: I0128 17:22:52.976893 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:22:53 crc kubenswrapper[4877]: I0128 17:22:53.025750 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.039159 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.093598 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qjc5h"] Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.093893 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qjc5h" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="registry-server" containerID="cri-o://f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300" gracePeriod=2 Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.729427 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.792622 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-utilities\") pod \"78c621c3-e709-4efa-9eb0-343cdd9b6440\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.792934 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-catalog-content\") pod \"78c621c3-e709-4efa-9eb0-343cdd9b6440\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.793364 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfsxh\" (UniqueName: \"kubernetes.io/projected/78c621c3-e709-4efa-9eb0-343cdd9b6440-kube-api-access-gfsxh\") pod \"78c621c3-e709-4efa-9eb0-343cdd9b6440\" (UID: \"78c621c3-e709-4efa-9eb0-343cdd9b6440\") " Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.802961 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-utilities" (OuterVolumeSpecName: "utilities") pod "78c621c3-e709-4efa-9eb0-343cdd9b6440" (UID: "78c621c3-e709-4efa-9eb0-343cdd9b6440"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.803582 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78c621c3-e709-4efa-9eb0-343cdd9b6440-kube-api-access-gfsxh" (OuterVolumeSpecName: "kube-api-access-gfsxh") pod "78c621c3-e709-4efa-9eb0-343cdd9b6440" (UID: "78c621c3-e709-4efa-9eb0-343cdd9b6440"). InnerVolumeSpecName "kube-api-access-gfsxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.862014 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "78c621c3-e709-4efa-9eb0-343cdd9b6440" (UID: "78c621c3-e709-4efa-9eb0-343cdd9b6440"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.896979 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.897010 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78c621c3-e709-4efa-9eb0-343cdd9b6440-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:03 crc kubenswrapper[4877]: I0128 17:23:03.897023 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfsxh\" (UniqueName: \"kubernetes.io/projected/78c621c3-e709-4efa-9eb0-343cdd9b6440-kube-api-access-gfsxh\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.081587 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qjc5h" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.081655 4877 generic.go:334] "Generic (PLEG): container finished" podID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerID="f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300" exitCode=0 Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.081665 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qjc5h" event={"ID":"78c621c3-e709-4efa-9eb0-343cdd9b6440","Type":"ContainerDied","Data":"f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300"} Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.081723 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qjc5h" event={"ID":"78c621c3-e709-4efa-9eb0-343cdd9b6440","Type":"ContainerDied","Data":"e605a2404c7b3e53b25f688305962d09d1258f6b2be54c08fa2435a0558b35b4"} Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.081745 4877 scope.go:117] "RemoveContainer" containerID="f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.132053 4877 scope.go:117] "RemoveContainer" containerID="77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.151722 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qjc5h"] Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.169650 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qjc5h"] Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.183124 4877 scope.go:117] "RemoveContainer" containerID="da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.250045 4877 scope.go:117] "RemoveContainer" containerID="f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300" Jan 28 17:23:04 crc kubenswrapper[4877]: E0128 17:23:04.250803 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300\": container with ID starting with f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300 not found: ID does not exist" containerID="f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.250934 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300"} err="failed to get container status \"f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300\": rpc error: code = NotFound desc = could not find container \"f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300\": container with ID starting with f4f41a729a3dd0275dd159ca52cbbc8a75cd7909373c711356660d66d4b7e300 not found: ID does not exist" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.251020 4877 scope.go:117] "RemoveContainer" containerID="77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85" Jan 28 17:23:04 crc kubenswrapper[4877]: E0128 17:23:04.251551 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85\": container with ID starting with 77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85 not found: ID does not exist" containerID="77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.251588 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85"} err="failed to get container status \"77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85\": rpc error: code = NotFound desc = could not find container \"77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85\": container with ID starting with 77ea33d00df81bab09e37ef0a9db4cd81e2116d5486c85172aa901958ebb8c85 not found: ID does not exist" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.251614 4877 scope.go:117] "RemoveContainer" containerID="da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356" Jan 28 17:23:04 crc kubenswrapper[4877]: E0128 17:23:04.252048 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356\": container with ID starting with da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356 not found: ID does not exist" containerID="da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356" Jan 28 17:23:04 crc kubenswrapper[4877]: I0128 17:23:04.252091 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356"} err="failed to get container status \"da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356\": rpc error: code = NotFound desc = could not find container \"da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356\": container with ID starting with da8c8dfc2212be90733782c19950b08a61253ea10d3581f17215fc40085be356 not found: ID does not exist" Jan 28 17:23:05 crc kubenswrapper[4877]: I0128 17:23:05.346892 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" path="/var/lib/kubelet/pods/78c621c3-e709-4efa-9eb0-343cdd9b6440/volumes" Jan 28 17:23:07 crc kubenswrapper[4877]: I0128 17:23:07.076540 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:23:07 crc kubenswrapper[4877]: I0128 17:23:07.076967 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:23:37 crc kubenswrapper[4877]: I0128 17:23:37.076369 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:23:37 crc kubenswrapper[4877]: I0128 17:23:37.077680 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:23:41 crc kubenswrapper[4877]: I0128 17:23:41.511159 4877 generic.go:334] "Generic (PLEG): container finished" podID="b4e57b01-768b-4c29-81a7-a7a755401cc6" containerID="04c70ee48a61cf72e7d6d8d605023078f8d704937cce91285301b0511b393ea9" exitCode=0 Jan 28 17:23:41 crc kubenswrapper[4877]: I0128 17:23:41.511242 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" event={"ID":"b4e57b01-768b-4c29-81a7-a7a755401cc6","Type":"ContainerDied","Data":"04c70ee48a61cf72e7d6d8d605023078f8d704937cce91285301b0511b393ea9"} Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.000557 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.126317 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-ssh-key-openstack-edpm-ipam\") pod \"b4e57b01-768b-4c29-81a7-a7a755401cc6\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.126381 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgqdf\" (UniqueName: \"kubernetes.io/projected/b4e57b01-768b-4c29-81a7-a7a755401cc6-kube-api-access-fgqdf\") pod \"b4e57b01-768b-4c29-81a7-a7a755401cc6\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.126445 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-secret-0\") pod \"b4e57b01-768b-4c29-81a7-a7a755401cc6\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.126490 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-combined-ca-bundle\") pod \"b4e57b01-768b-4c29-81a7-a7a755401cc6\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.126565 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-inventory\") pod \"b4e57b01-768b-4c29-81a7-a7a755401cc6\" (UID: \"b4e57b01-768b-4c29-81a7-a7a755401cc6\") " Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.132449 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "b4e57b01-768b-4c29-81a7-a7a755401cc6" (UID: "b4e57b01-768b-4c29-81a7-a7a755401cc6"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.132494 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4e57b01-768b-4c29-81a7-a7a755401cc6-kube-api-access-fgqdf" (OuterVolumeSpecName: "kube-api-access-fgqdf") pod "b4e57b01-768b-4c29-81a7-a7a755401cc6" (UID: "b4e57b01-768b-4c29-81a7-a7a755401cc6"). InnerVolumeSpecName "kube-api-access-fgqdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.160695 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "b4e57b01-768b-4c29-81a7-a7a755401cc6" (UID: "b4e57b01-768b-4c29-81a7-a7a755401cc6"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.163209 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-inventory" (OuterVolumeSpecName: "inventory") pod "b4e57b01-768b-4c29-81a7-a7a755401cc6" (UID: "b4e57b01-768b-4c29-81a7-a7a755401cc6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.165115 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b4e57b01-768b-4c29-81a7-a7a755401cc6" (UID: "b4e57b01-768b-4c29-81a7-a7a755401cc6"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.229186 4877 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.229235 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.229245 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.229254 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgqdf\" (UniqueName: \"kubernetes.io/projected/b4e57b01-768b-4c29-81a7-a7a755401cc6-kube-api-access-fgqdf\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.229264 4877 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b4e57b01-768b-4c29-81a7-a7a755401cc6-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.533403 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" event={"ID":"b4e57b01-768b-4c29-81a7-a7a755401cc6","Type":"ContainerDied","Data":"682d5d880adc930e012a8ad91b296fc9b197a4bacd6275fe19459f215f1876a4"} Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.533449 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="682d5d880adc930e012a8ad91b296fc9b197a4bacd6275fe19459f215f1876a4" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.533460 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-lrftl" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.637186 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s"] Jan 28 17:23:43 crc kubenswrapper[4877]: E0128 17:23:43.637905 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="extract-utilities" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.637933 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="extract-utilities" Jan 28 17:23:43 crc kubenswrapper[4877]: E0128 17:23:43.637962 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="registry-server" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.637971 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="registry-server" Jan 28 17:23:43 crc kubenswrapper[4877]: E0128 17:23:43.638015 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="extract-content" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.638022 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="extract-content" Jan 28 17:23:43 crc kubenswrapper[4877]: E0128 17:23:43.638044 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4e57b01-768b-4c29-81a7-a7a755401cc6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.638083 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4e57b01-768b-4c29-81a7-a7a755401cc6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.638374 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4e57b01-768b-4c29-81a7-a7a755401cc6" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.638411 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="78c621c3-e709-4efa-9eb0-343cdd9b6440" containerName="registry-server" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.639557 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.642458 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.652252 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.652644 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s"] Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.655213 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.655906 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.656049 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.656207 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.656349 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.755380 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.755779 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.755843 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.755891 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.755925 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5zh2\" (UniqueName: \"kubernetes.io/projected/00bd8837-b122-42f8-8e7f-47e40c3e8759-kube-api-access-z5zh2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.756276 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.756394 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.756507 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.756658 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.859596 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.859790 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.859841 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.859878 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.859909 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.859941 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5zh2\" (UniqueName: \"kubernetes.io/projected/00bd8837-b122-42f8-8e7f-47e40c3e8759-kube-api-access-z5zh2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.860107 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.860171 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.860215 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.861597 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.865259 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.865300 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.865719 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.871393 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.871460 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.871765 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.872270 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.881272 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5zh2\" (UniqueName: \"kubernetes.io/projected/00bd8837-b122-42f8-8e7f-47e40c3e8759-kube-api-access-z5zh2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-bcp2s\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:43 crc kubenswrapper[4877]: I0128 17:23:43.978918 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:23:44 crc kubenswrapper[4877]: I0128 17:23:44.527730 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s"] Jan 28 17:23:44 crc kubenswrapper[4877]: W0128 17:23:44.529179 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00bd8837_b122_42f8_8e7f_47e40c3e8759.slice/crio-e37b31f81a755a03c3515855f71985eb9b4405cab33a1661c189c886c18eae58 WatchSource:0}: Error finding container e37b31f81a755a03c3515855f71985eb9b4405cab33a1661c189c886c18eae58: Status 404 returned error can't find the container with id e37b31f81a755a03c3515855f71985eb9b4405cab33a1661c189c886c18eae58 Jan 28 17:23:44 crc kubenswrapper[4877]: I0128 17:23:44.555946 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" event={"ID":"00bd8837-b122-42f8-8e7f-47e40c3e8759","Type":"ContainerStarted","Data":"e37b31f81a755a03c3515855f71985eb9b4405cab33a1661c189c886c18eae58"} Jan 28 17:23:45 crc kubenswrapper[4877]: I0128 17:23:45.570516 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" event={"ID":"00bd8837-b122-42f8-8e7f-47e40c3e8759","Type":"ContainerStarted","Data":"d9953ac4fe745768932b9d632b50a1dc894cbf3d3d08eaeed0c3affdc7120cf7"} Jan 28 17:23:45 crc kubenswrapper[4877]: I0128 17:23:45.608350 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" podStartSLOduration=2.044824605 podStartE2EDuration="2.608327147s" podCreationTimestamp="2026-01-28 17:23:43 +0000 UTC" firstStartedPulling="2026-01-28 17:23:44.531634449 +0000 UTC m=+2928.089961337" lastFinishedPulling="2026-01-28 17:23:45.095136991 +0000 UTC m=+2928.653463879" observedRunningTime="2026-01-28 17:23:45.592277443 +0000 UTC m=+2929.150604341" watchObservedRunningTime="2026-01-28 17:23:45.608327147 +0000 UTC m=+2929.166654035" Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.076327 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.077048 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.077119 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.078347 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.078430 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" gracePeriod=600 Jan 28 17:24:07 crc kubenswrapper[4877]: E0128 17:24:07.227915 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.855017 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" exitCode=0 Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.855069 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1"} Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.855110 4877 scope.go:117] "RemoveContainer" containerID="0c5ff514a6eff15ca95ea02e7d577a96c1dbadfc956ddd5d6cbffc146f2313db" Jan 28 17:24:07 crc kubenswrapper[4877]: I0128 17:24:07.856198 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:24:07 crc kubenswrapper[4877]: E0128 17:24:07.856819 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:24:18 crc kubenswrapper[4877]: I0128 17:24:18.331051 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:24:18 crc kubenswrapper[4877]: E0128 17:24:18.332112 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:24:32 crc kubenswrapper[4877]: I0128 17:24:32.331639 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:24:32 crc kubenswrapper[4877]: E0128 17:24:32.332608 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:24:43 crc kubenswrapper[4877]: I0128 17:24:43.333813 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:24:43 crc kubenswrapper[4877]: E0128 17:24:43.335315 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:24:54 crc kubenswrapper[4877]: I0128 17:24:54.330369 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:24:54 crc kubenswrapper[4877]: E0128 17:24:54.332281 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:25:05 crc kubenswrapper[4877]: I0128 17:25:05.331615 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:25:05 crc kubenswrapper[4877]: E0128 17:25:05.332441 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:25:16 crc kubenswrapper[4877]: I0128 17:25:16.330731 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:25:16 crc kubenswrapper[4877]: E0128 17:25:16.331623 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.330676 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:25:29 crc kubenswrapper[4877]: E0128 17:25:29.331700 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.675334 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s8fsq"] Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.678964 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.690395 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s8fsq"] Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.780922 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/357d7739-8ef7-40ff-99a9-8968923fbc93-utilities\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.780988 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/357d7739-8ef7-40ff-99a9-8968923fbc93-catalog-content\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.781133 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpmkl\" (UniqueName: \"kubernetes.io/projected/357d7739-8ef7-40ff-99a9-8968923fbc93-kube-api-access-kpmkl\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.883353 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpmkl\" (UniqueName: \"kubernetes.io/projected/357d7739-8ef7-40ff-99a9-8968923fbc93-kube-api-access-kpmkl\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.883564 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/357d7739-8ef7-40ff-99a9-8968923fbc93-utilities\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.883614 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/357d7739-8ef7-40ff-99a9-8968923fbc93-catalog-content\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.884278 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/357d7739-8ef7-40ff-99a9-8968923fbc93-utilities\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.884277 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/357d7739-8ef7-40ff-99a9-8968923fbc93-catalog-content\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:29 crc kubenswrapper[4877]: I0128 17:25:29.907233 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpmkl\" (UniqueName: \"kubernetes.io/projected/357d7739-8ef7-40ff-99a9-8968923fbc93-kube-api-access-kpmkl\") pod \"community-operators-s8fsq\" (UID: \"357d7739-8ef7-40ff-99a9-8968923fbc93\") " pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:30 crc kubenswrapper[4877]: I0128 17:25:30.014728 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:25:30 crc kubenswrapper[4877]: I0128 17:25:30.587081 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s8fsq"] Jan 28 17:25:30 crc kubenswrapper[4877]: W0128 17:25:30.590397 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod357d7739_8ef7_40ff_99a9_8968923fbc93.slice/crio-6d54bba09ce261edf8a47cc9af62752cc6b0a2af4c83bea2bf5b95f213ff7328 WatchSource:0}: Error finding container 6d54bba09ce261edf8a47cc9af62752cc6b0a2af4c83bea2bf5b95f213ff7328: Status 404 returned error can't find the container with id 6d54bba09ce261edf8a47cc9af62752cc6b0a2af4c83bea2bf5b95f213ff7328 Jan 28 17:25:30 crc kubenswrapper[4877]: I0128 17:25:30.715298 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8fsq" event={"ID":"357d7739-8ef7-40ff-99a9-8968923fbc93","Type":"ContainerStarted","Data":"6d54bba09ce261edf8a47cc9af62752cc6b0a2af4c83bea2bf5b95f213ff7328"} Jan 28 17:25:31 crc kubenswrapper[4877]: I0128 17:25:31.726592 4877 generic.go:334] "Generic (PLEG): container finished" podID="357d7739-8ef7-40ff-99a9-8968923fbc93" containerID="bea3eecd2be5f9204d7f12025248ca666d7f799ef93032618decb8527241bd2e" exitCode=0 Jan 28 17:25:31 crc kubenswrapper[4877]: I0128 17:25:31.726684 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8fsq" event={"ID":"357d7739-8ef7-40ff-99a9-8968923fbc93","Type":"ContainerDied","Data":"bea3eecd2be5f9204d7f12025248ca666d7f799ef93032618decb8527241bd2e"} Jan 28 17:25:31 crc kubenswrapper[4877]: I0128 17:25:31.728657 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:25:44 crc kubenswrapper[4877]: I0128 17:25:44.332127 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:25:44 crc kubenswrapper[4877]: E0128 17:25:44.333098 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:25:48 crc kubenswrapper[4877]: I0128 17:25:48.937745 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8fsq" event={"ID":"357d7739-8ef7-40ff-99a9-8968923fbc93","Type":"ContainerStarted","Data":"cfa5419af827872f76e44c5d2c4b09988f7da67b3c34f3edc9283bbe27d2ec4d"} Jan 28 17:25:49 crc kubenswrapper[4877]: I0128 17:25:49.952886 4877 generic.go:334] "Generic (PLEG): container finished" podID="357d7739-8ef7-40ff-99a9-8968923fbc93" containerID="cfa5419af827872f76e44c5d2c4b09988f7da67b3c34f3edc9283bbe27d2ec4d" exitCode=0 Jan 28 17:25:49 crc kubenswrapper[4877]: I0128 17:25:49.953057 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8fsq" event={"ID":"357d7739-8ef7-40ff-99a9-8968923fbc93","Type":"ContainerDied","Data":"cfa5419af827872f76e44c5d2c4b09988f7da67b3c34f3edc9283bbe27d2ec4d"} Jan 28 17:25:53 crc kubenswrapper[4877]: I0128 17:25:53.996392 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8fsq" event={"ID":"357d7739-8ef7-40ff-99a9-8968923fbc93","Type":"ContainerStarted","Data":"dca7543dffdc924eabf6fd5ce679528a50a54b05a84274ca3ddb18f0f6439ca0"} Jan 28 17:25:54 crc kubenswrapper[4877]: I0128 17:25:54.026141 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s8fsq" podStartSLOduration=3.488747252 podStartE2EDuration="25.026115864s" podCreationTimestamp="2026-01-28 17:25:29 +0000 UTC" firstStartedPulling="2026-01-28 17:25:31.728340604 +0000 UTC m=+3035.286667482" lastFinishedPulling="2026-01-28 17:25:53.265709206 +0000 UTC m=+3056.824036094" observedRunningTime="2026-01-28 17:25:54.016165505 +0000 UTC m=+3057.574492393" watchObservedRunningTime="2026-01-28 17:25:54.026115864 +0000 UTC m=+3057.584442752" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.331885 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:25:55 crc kubenswrapper[4877]: E0128 17:25:55.332707 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.509751 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cznqq"] Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.512047 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.528779 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cznqq"] Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.576795 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-utilities\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.577278 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-catalog-content\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.577332 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6mz5\" (UniqueName: \"kubernetes.io/projected/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-kube-api-access-q6mz5\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.679893 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-catalog-content\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.679982 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6mz5\" (UniqueName: \"kubernetes.io/projected/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-kube-api-access-q6mz5\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.680124 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-utilities\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.680802 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-catalog-content\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.680952 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-utilities\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.703454 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6mz5\" (UniqueName: \"kubernetes.io/projected/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-kube-api-access-q6mz5\") pod \"redhat-operators-cznqq\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:55 crc kubenswrapper[4877]: I0128 17:25:55.839599 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:25:56 crc kubenswrapper[4877]: I0128 17:25:56.435384 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cznqq"] Jan 28 17:25:57 crc kubenswrapper[4877]: I0128 17:25:57.035296 4877 generic.go:334] "Generic (PLEG): container finished" podID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerID="af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94" exitCode=0 Jan 28 17:25:57 crc kubenswrapper[4877]: I0128 17:25:57.035356 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cznqq" event={"ID":"7bab1e5b-ced0-41c1-8abc-2319ad2d6051","Type":"ContainerDied","Data":"af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94"} Jan 28 17:25:57 crc kubenswrapper[4877]: I0128 17:25:57.035932 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cznqq" event={"ID":"7bab1e5b-ced0-41c1-8abc-2319ad2d6051","Type":"ContainerStarted","Data":"ab487019d8346d487857322b324c61b1b0738dd59d3e397c1c2afdd9798c36ba"} Jan 28 17:25:59 crc kubenswrapper[4877]: I0128 17:25:59.062147 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cznqq" event={"ID":"7bab1e5b-ced0-41c1-8abc-2319ad2d6051","Type":"ContainerStarted","Data":"0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46"} Jan 28 17:26:00 crc kubenswrapper[4877]: I0128 17:26:00.016133 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:26:00 crc kubenswrapper[4877]: I0128 17:26:00.016711 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:26:00 crc kubenswrapper[4877]: I0128 17:26:00.076708 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:26:00 crc kubenswrapper[4877]: I0128 17:26:00.144700 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s8fsq" Jan 28 17:26:00 crc kubenswrapper[4877]: I0128 17:26:00.723920 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s8fsq"] Jan 28 17:26:00 crc kubenswrapper[4877]: I0128 17:26:00.900791 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f5czb"] Jan 28 17:26:00 crc kubenswrapper[4877]: I0128 17:26:00.901071 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f5czb" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="registry-server" containerID="cri-o://133e7341fa50aa8eeabcd51985051d28974ce1e991f62c87d77b8c71ea41ec2f" gracePeriod=2 Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.111634 4877 generic.go:334] "Generic (PLEG): container finished" podID="ce950113-65ba-405a-806e-72b77961e39b" containerID="133e7341fa50aa8eeabcd51985051d28974ce1e991f62c87d77b8c71ea41ec2f" exitCode=0 Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.111720 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5czb" event={"ID":"ce950113-65ba-405a-806e-72b77961e39b","Type":"ContainerDied","Data":"133e7341fa50aa8eeabcd51985051d28974ce1e991f62c87d77b8c71ea41ec2f"} Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.586159 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5czb" Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.653613 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-utilities\") pod \"ce950113-65ba-405a-806e-72b77961e39b\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.653671 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dznp5\" (UniqueName: \"kubernetes.io/projected/ce950113-65ba-405a-806e-72b77961e39b-kube-api-access-dznp5\") pod \"ce950113-65ba-405a-806e-72b77961e39b\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.654002 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-catalog-content\") pod \"ce950113-65ba-405a-806e-72b77961e39b\" (UID: \"ce950113-65ba-405a-806e-72b77961e39b\") " Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.658290 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-utilities" (OuterVolumeSpecName: "utilities") pod "ce950113-65ba-405a-806e-72b77961e39b" (UID: "ce950113-65ba-405a-806e-72b77961e39b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.664836 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce950113-65ba-405a-806e-72b77961e39b-kube-api-access-dznp5" (OuterVolumeSpecName: "kube-api-access-dznp5") pod "ce950113-65ba-405a-806e-72b77961e39b" (UID: "ce950113-65ba-405a-806e-72b77961e39b"). InnerVolumeSpecName "kube-api-access-dznp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.758955 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.759333 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dznp5\" (UniqueName: \"kubernetes.io/projected/ce950113-65ba-405a-806e-72b77961e39b-kube-api-access-dznp5\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.771683 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce950113-65ba-405a-806e-72b77961e39b" (UID: "ce950113-65ba-405a-806e-72b77961e39b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:26:01 crc kubenswrapper[4877]: I0128 17:26:01.863219 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce950113-65ba-405a-806e-72b77961e39b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:02 crc kubenswrapper[4877]: I0128 17:26:02.131226 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5czb" Jan 28 17:26:02 crc kubenswrapper[4877]: I0128 17:26:02.132800 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5czb" event={"ID":"ce950113-65ba-405a-806e-72b77961e39b","Type":"ContainerDied","Data":"455cef043fda95500046c60ce638bfd5359f950a1bcdaff33195c669eb320a05"} Jan 28 17:26:02 crc kubenswrapper[4877]: I0128 17:26:02.132887 4877 scope.go:117] "RemoveContainer" containerID="133e7341fa50aa8eeabcd51985051d28974ce1e991f62c87d77b8c71ea41ec2f" Jan 28 17:26:02 crc kubenswrapper[4877]: I0128 17:26:02.179674 4877 scope.go:117] "RemoveContainer" containerID="5f8f87e41e94af1b0654fea7f75ed0399abe283f06efc1a83310324985776b85" Jan 28 17:26:02 crc kubenswrapper[4877]: I0128 17:26:02.183520 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f5czb"] Jan 28 17:26:02 crc kubenswrapper[4877]: I0128 17:26:02.197568 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f5czb"] Jan 28 17:26:02 crc kubenswrapper[4877]: I0128 17:26:02.223792 4877 scope.go:117] "RemoveContainer" containerID="bf1be9479ae9d8148ec4b81dff63a785ce7d235b3f4242763bf99bd2d48e6f88" Jan 28 17:26:03 crc kubenswrapper[4877]: I0128 17:26:03.347347 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce950113-65ba-405a-806e-72b77961e39b" path="/var/lib/kubelet/pods/ce950113-65ba-405a-806e-72b77961e39b/volumes" Jan 28 17:26:04 crc kubenswrapper[4877]: I0128 17:26:04.157982 4877 generic.go:334] "Generic (PLEG): container finished" podID="00bd8837-b122-42f8-8e7f-47e40c3e8759" containerID="d9953ac4fe745768932b9d632b50a1dc894cbf3d3d08eaeed0c3affdc7120cf7" exitCode=0 Jan 28 17:26:04 crc kubenswrapper[4877]: I0128 17:26:04.158073 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" event={"ID":"00bd8837-b122-42f8-8e7f-47e40c3e8759","Type":"ContainerDied","Data":"d9953ac4fe745768932b9d632b50a1dc894cbf3d3d08eaeed0c3affdc7120cf7"} Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.804824 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.896010 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5zh2\" (UniqueName: \"kubernetes.io/projected/00bd8837-b122-42f8-8e7f-47e40c3e8759-kube-api-access-z5zh2\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.896205 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-1\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.896360 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-ssh-key-openstack-edpm-ipam\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.896423 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-0\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.896575 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-combined-ca-bundle\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.903958 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.904078 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00bd8837-b122-42f8-8e7f-47e40c3e8759-kube-api-access-z5zh2" (OuterVolumeSpecName: "kube-api-access-z5zh2") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "kube-api-access-z5zh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.932541 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.933012 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:26:05 crc kubenswrapper[4877]: I0128 17:26:05.934737 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.000833 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-0\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.000902 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-1\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.001208 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-inventory\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.001257 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-extra-config-0\") pod \"00bd8837-b122-42f8-8e7f-47e40c3e8759\" (UID: \"00bd8837-b122-42f8-8e7f-47e40c3e8759\") " Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.001940 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.001964 4877 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.001974 4877 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.001984 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5zh2\" (UniqueName: \"kubernetes.io/projected/00bd8837-b122-42f8-8e7f-47e40c3e8759-kube-api-access-z5zh2\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.002000 4877 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.067761 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.071012 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-inventory" (OuterVolumeSpecName: "inventory") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.071688 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.079569 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "00bd8837-b122-42f8-8e7f-47e40c3e8759" (UID: "00bd8837-b122-42f8-8e7f-47e40c3e8759"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.105702 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.105768 4877 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.105786 4877 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.105799 4877 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/00bd8837-b122-42f8-8e7f-47e40c3e8759-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.183837 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.183861 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-bcp2s" event={"ID":"00bd8837-b122-42f8-8e7f-47e40c3e8759","Type":"ContainerDied","Data":"e37b31f81a755a03c3515855f71985eb9b4405cab33a1661c189c886c18eae58"} Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.184350 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e37b31f81a755a03c3515855f71985eb9b4405cab33a1661c189c886c18eae58" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.185962 4877 generic.go:334] "Generic (PLEG): container finished" podID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerID="0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46" exitCode=0 Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.185991 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cznqq" event={"ID":"7bab1e5b-ced0-41c1-8abc-2319ad2d6051","Type":"ContainerDied","Data":"0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46"} Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.296960 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk"] Jan 28 17:26:06 crc kubenswrapper[4877]: E0128 17:26:06.297626 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="registry-server" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.297648 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="registry-server" Jan 28 17:26:06 crc kubenswrapper[4877]: E0128 17:26:06.297696 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="extract-content" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.297705 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="extract-content" Jan 28 17:26:06 crc kubenswrapper[4877]: E0128 17:26:06.297720 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00bd8837-b122-42f8-8e7f-47e40c3e8759" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.297730 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="00bd8837-b122-42f8-8e7f-47e40c3e8759" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 17:26:06 crc kubenswrapper[4877]: E0128 17:26:06.297749 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="extract-utilities" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.297758 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="extract-utilities" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.298078 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="00bd8837-b122-42f8-8e7f-47e40c3e8759" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.298123 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce950113-65ba-405a-806e-72b77961e39b" containerName="registry-server" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.301147 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.305416 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.305696 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.307538 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.308029 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.308050 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.314817 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.314909 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.314980 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.315053 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txcg5\" (UniqueName: \"kubernetes.io/projected/4459106c-3297-4111-a2e8-5671b7525909-kube-api-access-txcg5\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.315099 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.315234 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.315303 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.320333 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk"] Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.418007 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.418381 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.418525 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.418564 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.418657 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txcg5\" (UniqueName: \"kubernetes.io/projected/4459106c-3297-4111-a2e8-5671b7525909-kube-api-access-txcg5\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.418752 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.418883 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.423836 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.424236 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.424390 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.425087 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.429000 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.431199 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.438280 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txcg5\" (UniqueName: \"kubernetes.io/projected/4459106c-3297-4111-a2e8-5671b7525909-kube-api-access-txcg5\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8whqk\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:06 crc kubenswrapper[4877]: I0128 17:26:06.628601 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:26:07 crc kubenswrapper[4877]: I0128 17:26:07.205124 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cznqq" event={"ID":"7bab1e5b-ced0-41c1-8abc-2319ad2d6051","Type":"ContainerStarted","Data":"4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f"} Jan 28 17:26:07 crc kubenswrapper[4877]: I0128 17:26:07.249467 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cznqq" podStartSLOduration=2.568000015 podStartE2EDuration="12.2494443s" podCreationTimestamp="2026-01-28 17:25:55 +0000 UTC" firstStartedPulling="2026-01-28 17:25:57.037364526 +0000 UTC m=+3060.595691414" lastFinishedPulling="2026-01-28 17:26:06.718808821 +0000 UTC m=+3070.277135699" observedRunningTime="2026-01-28 17:26:07.241637849 +0000 UTC m=+3070.799964757" watchObservedRunningTime="2026-01-28 17:26:07.2494443 +0000 UTC m=+3070.807771188" Jan 28 17:26:07 crc kubenswrapper[4877]: I0128 17:26:07.326005 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk"] Jan 28 17:26:07 crc kubenswrapper[4877]: W0128 17:26:07.336722 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4459106c_3297_4111_a2e8_5671b7525909.slice/crio-bcda2b510c1e5a0738f0748715469c75e3dca325ee48b6cfc371fc6b2b0819bc WatchSource:0}: Error finding container bcda2b510c1e5a0738f0748715469c75e3dca325ee48b6cfc371fc6b2b0819bc: Status 404 returned error can't find the container with id bcda2b510c1e5a0738f0748715469c75e3dca325ee48b6cfc371fc6b2b0819bc Jan 28 17:26:07 crc kubenswrapper[4877]: I0128 17:26:07.349655 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:26:07 crc kubenswrapper[4877]: E0128 17:26:07.350012 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:26:08 crc kubenswrapper[4877]: I0128 17:26:08.222146 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" event={"ID":"4459106c-3297-4111-a2e8-5671b7525909","Type":"ContainerStarted","Data":"bcda2b510c1e5a0738f0748715469c75e3dca325ee48b6cfc371fc6b2b0819bc"} Jan 28 17:26:09 crc kubenswrapper[4877]: I0128 17:26:09.232722 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" event={"ID":"4459106c-3297-4111-a2e8-5671b7525909","Type":"ContainerStarted","Data":"105c4bfaebf5194d3ce4b81b25e29d460760dd56ec067a5ceef8127aba845871"} Jan 28 17:26:09 crc kubenswrapper[4877]: I0128 17:26:09.251963 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" podStartSLOduration=2.651386194 podStartE2EDuration="3.251944232s" podCreationTimestamp="2026-01-28 17:26:06 +0000 UTC" firstStartedPulling="2026-01-28 17:26:07.347794378 +0000 UTC m=+3070.906121256" lastFinishedPulling="2026-01-28 17:26:07.948352406 +0000 UTC m=+3071.506679294" observedRunningTime="2026-01-28 17:26:09.247739469 +0000 UTC m=+3072.806066357" watchObservedRunningTime="2026-01-28 17:26:09.251944232 +0000 UTC m=+3072.810271120" Jan 28 17:26:15 crc kubenswrapper[4877]: I0128 17:26:15.840058 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:26:15 crc kubenswrapper[4877]: I0128 17:26:15.840707 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:26:16 crc kubenswrapper[4877]: I0128 17:26:16.903576 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cznqq" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="registry-server" probeResult="failure" output=< Jan 28 17:26:16 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:26:16 crc kubenswrapper[4877]: > Jan 28 17:26:20 crc kubenswrapper[4877]: I0128 17:26:20.331334 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:26:20 crc kubenswrapper[4877]: E0128 17:26:20.332185 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:26:26 crc kubenswrapper[4877]: I0128 17:26:26.890128 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cznqq" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="registry-server" probeResult="failure" output=< Jan 28 17:26:26 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:26:26 crc kubenswrapper[4877]: > Jan 28 17:26:31 crc kubenswrapper[4877]: I0128 17:26:31.330522 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:26:31 crc kubenswrapper[4877]: E0128 17:26:31.331335 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:26:35 crc kubenswrapper[4877]: I0128 17:26:35.982652 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:26:36 crc kubenswrapper[4877]: I0128 17:26:36.040811 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:26:36 crc kubenswrapper[4877]: I0128 17:26:36.223374 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cznqq"] Jan 28 17:26:37 crc kubenswrapper[4877]: I0128 17:26:37.946935 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cznqq" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="registry-server" containerID="cri-o://4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f" gracePeriod=2 Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.483651 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.563507 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-utilities\") pod \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.563729 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6mz5\" (UniqueName: \"kubernetes.io/projected/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-kube-api-access-q6mz5\") pod \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.564009 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-catalog-content\") pod \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\" (UID: \"7bab1e5b-ced0-41c1-8abc-2319ad2d6051\") " Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.564204 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-utilities" (OuterVolumeSpecName: "utilities") pod "7bab1e5b-ced0-41c1-8abc-2319ad2d6051" (UID: "7bab1e5b-ced0-41c1-8abc-2319ad2d6051"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.564715 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.577811 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-kube-api-access-q6mz5" (OuterVolumeSpecName: "kube-api-access-q6mz5") pod "7bab1e5b-ced0-41c1-8abc-2319ad2d6051" (UID: "7bab1e5b-ced0-41c1-8abc-2319ad2d6051"). InnerVolumeSpecName "kube-api-access-q6mz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.666676 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6mz5\" (UniqueName: \"kubernetes.io/projected/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-kube-api-access-q6mz5\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.699916 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7bab1e5b-ced0-41c1-8abc-2319ad2d6051" (UID: "7bab1e5b-ced0-41c1-8abc-2319ad2d6051"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.769074 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bab1e5b-ced0-41c1-8abc-2319ad2d6051-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.958963 4877 generic.go:334] "Generic (PLEG): container finished" podID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerID="4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f" exitCode=0 Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.959027 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cznqq" event={"ID":"7bab1e5b-ced0-41c1-8abc-2319ad2d6051","Type":"ContainerDied","Data":"4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f"} Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.959100 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cznqq" event={"ID":"7bab1e5b-ced0-41c1-8abc-2319ad2d6051","Type":"ContainerDied","Data":"ab487019d8346d487857322b324c61b1b0738dd59d3e397c1c2afdd9798c36ba"} Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.959115 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cznqq" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.959128 4877 scope.go:117] "RemoveContainer" containerID="4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f" Jan 28 17:26:38 crc kubenswrapper[4877]: I0128 17:26:38.984842 4877 scope.go:117] "RemoveContainer" containerID="0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.000531 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cznqq"] Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.017768 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cznqq"] Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.019985 4877 scope.go:117] "RemoveContainer" containerID="af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.076753 4877 scope.go:117] "RemoveContainer" containerID="4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f" Jan 28 17:26:39 crc kubenswrapper[4877]: E0128 17:26:39.077657 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f\": container with ID starting with 4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f not found: ID does not exist" containerID="4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.077715 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f"} err="failed to get container status \"4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f\": rpc error: code = NotFound desc = could not find container \"4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f\": container with ID starting with 4732fa9261cc3d68a11a4d58fc42cc426ecfb605bf96b8e880d0b90f5ad6524f not found: ID does not exist" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.077747 4877 scope.go:117] "RemoveContainer" containerID="0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46" Jan 28 17:26:39 crc kubenswrapper[4877]: E0128 17:26:39.078218 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46\": container with ID starting with 0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46 not found: ID does not exist" containerID="0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.078277 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46"} err="failed to get container status \"0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46\": rpc error: code = NotFound desc = could not find container \"0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46\": container with ID starting with 0381fe87e7f0c1d13d80ec09542212489009c3ad6a2eea0603d9f1114b31da46 not found: ID does not exist" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.078324 4877 scope.go:117] "RemoveContainer" containerID="af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94" Jan 28 17:26:39 crc kubenswrapper[4877]: E0128 17:26:39.078803 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94\": container with ID starting with af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94 not found: ID does not exist" containerID="af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.078834 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94"} err="failed to get container status \"af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94\": rpc error: code = NotFound desc = could not find container \"af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94\": container with ID starting with af21dda25d5ff2b96e7f87102d58c25157da93a537a8aa706b98fdff29dbea94 not found: ID does not exist" Jan 28 17:26:39 crc kubenswrapper[4877]: I0128 17:26:39.342918 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" path="/var/lib/kubelet/pods/7bab1e5b-ced0-41c1-8abc-2319ad2d6051/volumes" Jan 28 17:26:43 crc kubenswrapper[4877]: I0128 17:26:43.331026 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:26:43 crc kubenswrapper[4877]: E0128 17:26:43.331972 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:26:58 crc kubenswrapper[4877]: I0128 17:26:58.330543 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:26:58 crc kubenswrapper[4877]: E0128 17:26:58.331285 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:27:09 crc kubenswrapper[4877]: I0128 17:27:09.330659 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:27:09 crc kubenswrapper[4877]: E0128 17:27:09.331634 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:27:22 crc kubenswrapper[4877]: I0128 17:27:22.331309 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:27:22 crc kubenswrapper[4877]: E0128 17:27:22.332425 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:27:33 crc kubenswrapper[4877]: I0128 17:27:33.335738 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:27:33 crc kubenswrapper[4877]: E0128 17:27:33.338052 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:27:47 crc kubenswrapper[4877]: I0128 17:27:47.338950 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:27:47 crc kubenswrapper[4877]: E0128 17:27:47.339919 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:27:58 crc kubenswrapper[4877]: I0128 17:27:58.330722 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:27:58 crc kubenswrapper[4877]: E0128 17:27:58.331638 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:28:09 crc kubenswrapper[4877]: I0128 17:28:09.332719 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:28:09 crc kubenswrapper[4877]: E0128 17:28:09.334276 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:28:22 crc kubenswrapper[4877]: I0128 17:28:22.333668 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:28:22 crc kubenswrapper[4877]: E0128 17:28:22.335099 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:28:24 crc kubenswrapper[4877]: I0128 17:28:24.213256 4877 generic.go:334] "Generic (PLEG): container finished" podID="4459106c-3297-4111-a2e8-5671b7525909" containerID="105c4bfaebf5194d3ce4b81b25e29d460760dd56ec067a5ceef8127aba845871" exitCode=0 Jan 28 17:28:24 crc kubenswrapper[4877]: I0128 17:28:24.213360 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" event={"ID":"4459106c-3297-4111-a2e8-5671b7525909","Type":"ContainerDied","Data":"105c4bfaebf5194d3ce4b81b25e29d460760dd56ec067a5ceef8127aba845871"} Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.754091 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.873895 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txcg5\" (UniqueName: \"kubernetes.io/projected/4459106c-3297-4111-a2e8-5671b7525909-kube-api-access-txcg5\") pod \"4459106c-3297-4111-a2e8-5671b7525909\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.874061 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-2\") pod \"4459106c-3297-4111-a2e8-5671b7525909\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.874085 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ssh-key-openstack-edpm-ipam\") pod \"4459106c-3297-4111-a2e8-5671b7525909\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.874141 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-1\") pod \"4459106c-3297-4111-a2e8-5671b7525909\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.874173 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-telemetry-combined-ca-bundle\") pod \"4459106c-3297-4111-a2e8-5671b7525909\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.874208 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-inventory\") pod \"4459106c-3297-4111-a2e8-5671b7525909\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.874268 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-0\") pod \"4459106c-3297-4111-a2e8-5671b7525909\" (UID: \"4459106c-3297-4111-a2e8-5671b7525909\") " Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.898725 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4459106c-3297-4111-a2e8-5671b7525909-kube-api-access-txcg5" (OuterVolumeSpecName: "kube-api-access-txcg5") pod "4459106c-3297-4111-a2e8-5671b7525909" (UID: "4459106c-3297-4111-a2e8-5671b7525909"). InnerVolumeSpecName "kube-api-access-txcg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.899506 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4459106c-3297-4111-a2e8-5671b7525909" (UID: "4459106c-3297-4111-a2e8-5671b7525909"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.913379 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-inventory" (OuterVolumeSpecName: "inventory") pod "4459106c-3297-4111-a2e8-5671b7525909" (UID: "4459106c-3297-4111-a2e8-5671b7525909"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.913903 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "4459106c-3297-4111-a2e8-5671b7525909" (UID: "4459106c-3297-4111-a2e8-5671b7525909"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.918127 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "4459106c-3297-4111-a2e8-5671b7525909" (UID: "4459106c-3297-4111-a2e8-5671b7525909"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.927425 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4459106c-3297-4111-a2e8-5671b7525909" (UID: "4459106c-3297-4111-a2e8-5671b7525909"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.929662 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "4459106c-3297-4111-a2e8-5671b7525909" (UID: "4459106c-3297-4111-a2e8-5671b7525909"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.979054 4877 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.979115 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.979136 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.979152 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txcg5\" (UniqueName: \"kubernetes.io/projected/4459106c-3297-4111-a2e8-5671b7525909-kube-api-access-txcg5\") on node \"crc\" DevicePath \"\"" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.979166 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.979181 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:28:25 crc kubenswrapper[4877]: I0128 17:28:25.979195 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4459106c-3297-4111-a2e8-5671b7525909-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.244932 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" event={"ID":"4459106c-3297-4111-a2e8-5671b7525909","Type":"ContainerDied","Data":"bcda2b510c1e5a0738f0748715469c75e3dca325ee48b6cfc371fc6b2b0819bc"} Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.244996 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcda2b510c1e5a0738f0748715469c75e3dca325ee48b6cfc371fc6b2b0819bc" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.245538 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8whqk" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.362759 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2"] Jan 28 17:28:26 crc kubenswrapper[4877]: E0128 17:28:26.363239 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="registry-server" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.363261 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="registry-server" Jan 28 17:28:26 crc kubenswrapper[4877]: E0128 17:28:26.363303 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="extract-utilities" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.363312 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="extract-utilities" Jan 28 17:28:26 crc kubenswrapper[4877]: E0128 17:28:26.363327 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="extract-content" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.363337 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="extract-content" Jan 28 17:28:26 crc kubenswrapper[4877]: E0128 17:28:26.363356 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4459106c-3297-4111-a2e8-5671b7525909" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.363365 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4459106c-3297-4111-a2e8-5671b7525909" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.363598 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4459106c-3297-4111-a2e8-5671b7525909" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.363633 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bab1e5b-ced0-41c1-8abc-2319ad2d6051" containerName="registry-server" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.364433 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.367293 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.367582 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.368718 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.368799 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.372275 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.382529 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2"] Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.492741 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.492840 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxwrb\" (UniqueName: \"kubernetes.io/projected/13750eaa-4bdc-4e97-aabc-6f9bbb702666-kube-api-access-bxwrb\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.492902 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.493002 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.493048 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.493098 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.494025 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.598344 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxwrb\" (UniqueName: \"kubernetes.io/projected/13750eaa-4bdc-4e97-aabc-6f9bbb702666-kube-api-access-bxwrb\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.598528 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.598577 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.598616 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.598648 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.598799 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.598908 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.604190 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.604269 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.604276 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ssh-key-openstack-edpm-ipam\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.604497 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.605145 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.606119 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.620641 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxwrb\" (UniqueName: \"kubernetes.io/projected/13750eaa-4bdc-4e97-aabc-6f9bbb702666-kube-api-access-bxwrb\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:26 crc kubenswrapper[4877]: I0128 17:28:26.687277 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:28:27 crc kubenswrapper[4877]: I0128 17:28:27.313623 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2"] Jan 28 17:28:28 crc kubenswrapper[4877]: I0128 17:28:28.274820 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" event={"ID":"13750eaa-4bdc-4e97-aabc-6f9bbb702666","Type":"ContainerStarted","Data":"47985cbf21aee35aed15eb4c48c240da890200c92b7d82d5d4ef95a499afca29"} Jan 28 17:28:29 crc kubenswrapper[4877]: I0128 17:28:29.290099 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" event={"ID":"13750eaa-4bdc-4e97-aabc-6f9bbb702666","Type":"ContainerStarted","Data":"5a2890711f48e87cf069122816e9904adee8c6624b02416b7b0f0ea12ec4e1cc"} Jan 28 17:28:29 crc kubenswrapper[4877]: I0128 17:28:29.327648 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" podStartSLOduration=2.70539189 podStartE2EDuration="3.327620573s" podCreationTimestamp="2026-01-28 17:28:26 +0000 UTC" firstStartedPulling="2026-01-28 17:28:27.317500616 +0000 UTC m=+3210.875827504" lastFinishedPulling="2026-01-28 17:28:27.939729299 +0000 UTC m=+3211.498056187" observedRunningTime="2026-01-28 17:28:29.32157382 +0000 UTC m=+3212.879900728" watchObservedRunningTime="2026-01-28 17:28:29.327620573 +0000 UTC m=+3212.885947461" Jan 28 17:28:37 crc kubenswrapper[4877]: I0128 17:28:37.341597 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:28:37 crc kubenswrapper[4877]: E0128 17:28:37.342578 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:28:51 crc kubenswrapper[4877]: I0128 17:28:51.330651 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:28:51 crc kubenswrapper[4877]: E0128 17:28:51.331749 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:29:03 crc kubenswrapper[4877]: I0128 17:29:03.330945 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:29:03 crc kubenswrapper[4877]: E0128 17:29:03.331944 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:29:15 crc kubenswrapper[4877]: I0128 17:29:15.333120 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:29:15 crc kubenswrapper[4877]: I0128 17:29:15.929324 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"3dd86440232db0a12c7d2553820bdf67cd2b92550ff34d6442d63e0d796327d2"} Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.155957 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd"] Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.162689 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.166026 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.166133 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.171969 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd"] Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.215410 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-secret-volume\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.215802 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-config-volume\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.215902 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsn8g\" (UniqueName: \"kubernetes.io/projected/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-kube-api-access-bsn8g\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.320636 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-secret-volume\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.320934 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-config-volume\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.320995 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsn8g\" (UniqueName: \"kubernetes.io/projected/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-kube-api-access-bsn8g\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.322820 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-config-volume\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.334931 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-secret-volume\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.345087 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsn8g\" (UniqueName: \"kubernetes.io/projected/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-kube-api-access-bsn8g\") pod \"collect-profiles-29493690-9gxxd\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.491344 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:00 crc kubenswrapper[4877]: I0128 17:30:00.975689 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd"] Jan 28 17:30:00 crc kubenswrapper[4877]: W0128 17:30:00.984070 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ec7a03e_8732_4dfe_90c2_4b9cd48d4c7d.slice/crio-5da255e4cd47e646bc632046c6a47922176458ff9734ee4aee38ddb27384797f WatchSource:0}: Error finding container 5da255e4cd47e646bc632046c6a47922176458ff9734ee4aee38ddb27384797f: Status 404 returned error can't find the container with id 5da255e4cd47e646bc632046c6a47922176458ff9734ee4aee38ddb27384797f Jan 28 17:30:01 crc kubenswrapper[4877]: I0128 17:30:01.781017 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" event={"ID":"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d","Type":"ContainerStarted","Data":"ac8512b8f0cfa68903416d050f5cf69149e70c3c33e3965ce8c407439f67b0c6"} Jan 28 17:30:01 crc kubenswrapper[4877]: I0128 17:30:01.781337 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" event={"ID":"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d","Type":"ContainerStarted","Data":"5da255e4cd47e646bc632046c6a47922176458ff9734ee4aee38ddb27384797f"} Jan 28 17:30:01 crc kubenswrapper[4877]: I0128 17:30:01.805930 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" podStartSLOduration=1.805902917 podStartE2EDuration="1.805902917s" podCreationTimestamp="2026-01-28 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:30:01.798230719 +0000 UTC m=+3305.356557637" watchObservedRunningTime="2026-01-28 17:30:01.805902917 +0000 UTC m=+3305.364229805" Jan 28 17:30:02 crc kubenswrapper[4877]: I0128 17:30:02.801368 4877 generic.go:334] "Generic (PLEG): container finished" podID="1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" containerID="ac8512b8f0cfa68903416d050f5cf69149e70c3c33e3965ce8c407439f67b0c6" exitCode=0 Jan 28 17:30:02 crc kubenswrapper[4877]: I0128 17:30:02.802093 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" event={"ID":"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d","Type":"ContainerDied","Data":"ac8512b8f0cfa68903416d050f5cf69149e70c3c33e3965ce8c407439f67b0c6"} Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.276227 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.366101 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsn8g\" (UniqueName: \"kubernetes.io/projected/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-kube-api-access-bsn8g\") pod \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.366395 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-config-volume\") pod \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.366652 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-secret-volume\") pod \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\" (UID: \"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d\") " Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.367369 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-config-volume" (OuterVolumeSpecName: "config-volume") pod "1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" (UID: "1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.374147 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-kube-api-access-bsn8g" (OuterVolumeSpecName: "kube-api-access-bsn8g") pod "1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" (UID: "1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d"). InnerVolumeSpecName "kube-api-access-bsn8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.374712 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" (UID: "1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.470142 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsn8g\" (UniqueName: \"kubernetes.io/projected/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-kube-api-access-bsn8g\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.470194 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.470205 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.829370 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" event={"ID":"1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d","Type":"ContainerDied","Data":"5da255e4cd47e646bc632046c6a47922176458ff9734ee4aee38ddb27384797f"} Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.829415 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5da255e4cd47e646bc632046c6a47922176458ff9734ee4aee38ddb27384797f" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.829525 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd" Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.905862 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd"] Jan 28 17:30:04 crc kubenswrapper[4877]: I0128 17:30:04.922059 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493645-pn7wd"] Jan 28 17:30:05 crc kubenswrapper[4877]: I0128 17:30:05.354697 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a605e496-3f0b-4d5d-869e-f39742045553" path="/var/lib/kubelet/pods/a605e496-3f0b-4d5d-869e-f39742045553/volumes" Jan 28 17:30:22 crc kubenswrapper[4877]: I0128 17:30:22.013539 4877 generic.go:334] "Generic (PLEG): container finished" podID="13750eaa-4bdc-4e97-aabc-6f9bbb702666" containerID="5a2890711f48e87cf069122816e9904adee8c6624b02416b7b0f0ea12ec4e1cc" exitCode=0 Jan 28 17:30:22 crc kubenswrapper[4877]: I0128 17:30:22.013604 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" event={"ID":"13750eaa-4bdc-4e97-aabc-6f9bbb702666","Type":"ContainerDied","Data":"5a2890711f48e87cf069122816e9904adee8c6624b02416b7b0f0ea12ec4e1cc"} Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.652855 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.819838 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-0\") pod \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.820023 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-1\") pod \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.820140 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxwrb\" (UniqueName: \"kubernetes.io/projected/13750eaa-4bdc-4e97-aabc-6f9bbb702666-kube-api-access-bxwrb\") pod \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.820203 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-telemetry-power-monitoring-combined-ca-bundle\") pod \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.820290 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-2\") pod \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.820432 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ssh-key-openstack-edpm-ipam\") pod \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.820725 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-inventory\") pod \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\" (UID: \"13750eaa-4bdc-4e97-aabc-6f9bbb702666\") " Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.829178 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13750eaa-4bdc-4e97-aabc-6f9bbb702666-kube-api-access-bxwrb" (OuterVolumeSpecName: "kube-api-access-bxwrb") pod "13750eaa-4bdc-4e97-aabc-6f9bbb702666" (UID: "13750eaa-4bdc-4e97-aabc-6f9bbb702666"). InnerVolumeSpecName "kube-api-access-bxwrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.839470 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "13750eaa-4bdc-4e97-aabc-6f9bbb702666" (UID: "13750eaa-4bdc-4e97-aabc-6f9bbb702666"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.860236 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "13750eaa-4bdc-4e97-aabc-6f9bbb702666" (UID: "13750eaa-4bdc-4e97-aabc-6f9bbb702666"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.872136 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-inventory" (OuterVolumeSpecName: "inventory") pod "13750eaa-4bdc-4e97-aabc-6f9bbb702666" (UID: "13750eaa-4bdc-4e97-aabc-6f9bbb702666"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.872203 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "13750eaa-4bdc-4e97-aabc-6f9bbb702666" (UID: "13750eaa-4bdc-4e97-aabc-6f9bbb702666"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.878953 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "13750eaa-4bdc-4e97-aabc-6f9bbb702666" (UID: "13750eaa-4bdc-4e97-aabc-6f9bbb702666"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.885180 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "13750eaa-4bdc-4e97-aabc-6f9bbb702666" (UID: "13750eaa-4bdc-4e97-aabc-6f9bbb702666"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.924450 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.924509 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxwrb\" (UniqueName: \"kubernetes.io/projected/13750eaa-4bdc-4e97-aabc-6f9bbb702666-kube-api-access-bxwrb\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.924520 4877 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.924534 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.924543 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.924553 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:23 crc kubenswrapper[4877]: I0128 17:30:23.924561 4877 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/13750eaa-4bdc-4e97-aabc-6f9bbb702666-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.040141 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" event={"ID":"13750eaa-4bdc-4e97-aabc-6f9bbb702666","Type":"ContainerDied","Data":"47985cbf21aee35aed15eb4c48c240da890200c92b7d82d5d4ef95a499afca29"} Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.040191 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47985cbf21aee35aed15eb4c48c240da890200c92b7d82d5d4ef95a499afca29" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.040215 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-gp5t2" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.213501 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9"] Jan 28 17:30:24 crc kubenswrapper[4877]: E0128 17:30:24.214120 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" containerName="collect-profiles" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.214150 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" containerName="collect-profiles" Jan 28 17:30:24 crc kubenswrapper[4877]: E0128 17:30:24.214211 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13750eaa-4bdc-4e97-aabc-6f9bbb702666" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.214223 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="13750eaa-4bdc-4e97-aabc-6f9bbb702666" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.214502 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" containerName="collect-profiles" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.214546 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="13750eaa-4bdc-4e97-aabc-6f9bbb702666" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.215550 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.219031 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.219418 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.220168 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9qhb" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.221173 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.223245 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.231793 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9"] Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.334115 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.334200 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.334283 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr8wb\" (UniqueName: \"kubernetes.io/projected/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-kube-api-access-hr8wb\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.334317 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.334360 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.437667 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr8wb\" (UniqueName: \"kubernetes.io/projected/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-kube-api-access-hr8wb\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.437750 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.437851 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.438127 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.438264 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.443462 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.443714 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.448837 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-ssh-key-openstack-edpm-ipam\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.449269 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.459859 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr8wb\" (UniqueName: \"kubernetes.io/projected/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-kube-api-access-hr8wb\") pod \"logging-edpm-deployment-openstack-edpm-ipam-d8qx9\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:24 crc kubenswrapper[4877]: I0128 17:30:24.537522 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:25 crc kubenswrapper[4877]: I0128 17:30:25.154070 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9"] Jan 28 17:30:26 crc kubenswrapper[4877]: I0128 17:30:26.067583 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" event={"ID":"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2","Type":"ContainerStarted","Data":"8733b96c26a623e125c1e0f973a9c5980fd94f274f5d201c2efc74de470a446b"} Jan 28 17:30:26 crc kubenswrapper[4877]: I0128 17:30:26.068058 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" event={"ID":"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2","Type":"ContainerStarted","Data":"49917b8849f8e93980eef20da19c621a37e914ca3fb45fa30a450693e553c127"} Jan 28 17:30:27 crc kubenswrapper[4877]: I0128 17:30:27.100974 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" podStartSLOduration=2.605890409 podStartE2EDuration="3.100953963s" podCreationTimestamp="2026-01-28 17:30:24 +0000 UTC" firstStartedPulling="2026-01-28 17:30:25.171467097 +0000 UTC m=+3328.729793995" lastFinishedPulling="2026-01-28 17:30:25.666530671 +0000 UTC m=+3329.224857549" observedRunningTime="2026-01-28 17:30:27.097790907 +0000 UTC m=+3330.656117795" watchObservedRunningTime="2026-01-28 17:30:27.100953963 +0000 UTC m=+3330.659280871" Jan 28 17:30:30 crc kubenswrapper[4877]: I0128 17:30:30.762323 4877 scope.go:117] "RemoveContainer" containerID="cb6d6e18484c430d6b4740a6e4ad872ab74d6e3be62cec894554db87c05e231d" Jan 28 17:30:40 crc kubenswrapper[4877]: I0128 17:30:40.218005 4877 generic.go:334] "Generic (PLEG): container finished" podID="5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" containerID="8733b96c26a623e125c1e0f973a9c5980fd94f274f5d201c2efc74de470a446b" exitCode=0 Jan 28 17:30:40 crc kubenswrapper[4877]: I0128 17:30:40.218099 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" event={"ID":"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2","Type":"ContainerDied","Data":"8733b96c26a623e125c1e0f973a9c5980fd94f274f5d201c2efc74de470a446b"} Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.860087 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.907396 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-0\") pod \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.907842 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-inventory\") pod \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.908132 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-ssh-key-openstack-edpm-ipam\") pod \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.908466 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hr8wb\" (UniqueName: \"kubernetes.io/projected/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-kube-api-access-hr8wb\") pod \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.908591 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-1\") pod \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\" (UID: \"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2\") " Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.945663 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-kube-api-access-hr8wb" (OuterVolumeSpecName: "kube-api-access-hr8wb") pod "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" (UID: "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2"). InnerVolumeSpecName "kube-api-access-hr8wb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.953410 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" (UID: "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.954330 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" (UID: "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.955119 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" (UID: "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:41 crc kubenswrapper[4877]: I0128 17:30:41.959185 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-inventory" (OuterVolumeSpecName: "inventory") pod "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" (UID: "5bfb89c1-fedb-4d4f-be4d-3bf42757bce2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.023113 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.023166 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hr8wb\" (UniqueName: \"kubernetes.io/projected/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-kube-api-access-hr8wb\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.023312 4877 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.023332 4877 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.023354 4877 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bfb89c1-fedb-4d4f-be4d-3bf42757bce2-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.239871 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" event={"ID":"5bfb89c1-fedb-4d4f-be4d-3bf42757bce2","Type":"ContainerDied","Data":"49917b8849f8e93980eef20da19c621a37e914ca3fb45fa30a450693e553c127"} Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.239910 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49917b8849f8e93980eef20da19c621a37e914ca3fb45fa30a450693e553c127" Jan 28 17:30:42 crc kubenswrapper[4877]: I0128 17:30:42.240023 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-d8qx9" Jan 28 17:31:37 crc kubenswrapper[4877]: I0128 17:31:37.077208 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:31:37 crc kubenswrapper[4877]: I0128 17:31:37.078024 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.533063 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jl5wc"] Jan 28 17:31:47 crc kubenswrapper[4877]: E0128 17:31:47.534729 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.534746 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.536389 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bfb89c1-fedb-4d4f-be4d-3bf42757bce2" containerName="logging-edpm-deployment-openstack-edpm-ipam" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.539893 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.553374 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl5wc"] Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.654565 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkh76\" (UniqueName: \"kubernetes.io/projected/08fbe9ca-309f-4362-9103-f62b983372ae-kube-api-access-vkh76\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.654655 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-catalog-content\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.654837 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-utilities\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.758752 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-utilities\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.759006 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkh76\" (UniqueName: \"kubernetes.io/projected/08fbe9ca-309f-4362-9103-f62b983372ae-kube-api-access-vkh76\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.759058 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-catalog-content\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.759440 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-utilities\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.759518 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-catalog-content\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.791304 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkh76\" (UniqueName: \"kubernetes.io/projected/08fbe9ca-309f-4362-9103-f62b983372ae-kube-api-access-vkh76\") pod \"redhat-marketplace-jl5wc\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:47 crc kubenswrapper[4877]: I0128 17:31:47.889464 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:48 crc kubenswrapper[4877]: I0128 17:31:48.480964 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl5wc"] Jan 28 17:31:49 crc kubenswrapper[4877]: I0128 17:31:49.104052 4877 generic.go:334] "Generic (PLEG): container finished" podID="08fbe9ca-309f-4362-9103-f62b983372ae" containerID="1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25" exitCode=0 Jan 28 17:31:49 crc kubenswrapper[4877]: I0128 17:31:49.104662 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl5wc" event={"ID":"08fbe9ca-309f-4362-9103-f62b983372ae","Type":"ContainerDied","Data":"1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25"} Jan 28 17:31:49 crc kubenswrapper[4877]: I0128 17:31:49.104713 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl5wc" event={"ID":"08fbe9ca-309f-4362-9103-f62b983372ae","Type":"ContainerStarted","Data":"21857b7949c95beb45afa4b7969b774d6f6a13a968e7f974a0b3955e09b51f70"} Jan 28 17:31:49 crc kubenswrapper[4877]: I0128 17:31:49.112682 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:31:51 crc kubenswrapper[4877]: I0128 17:31:51.136134 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl5wc" event={"ID":"08fbe9ca-309f-4362-9103-f62b983372ae","Type":"ContainerStarted","Data":"085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf"} Jan 28 17:31:53 crc kubenswrapper[4877]: I0128 17:31:53.173871 4877 generic.go:334] "Generic (PLEG): container finished" podID="08fbe9ca-309f-4362-9103-f62b983372ae" containerID="085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf" exitCode=0 Jan 28 17:31:53 crc kubenswrapper[4877]: I0128 17:31:53.174035 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl5wc" event={"ID":"08fbe9ca-309f-4362-9103-f62b983372ae","Type":"ContainerDied","Data":"085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf"} Jan 28 17:31:55 crc kubenswrapper[4877]: I0128 17:31:55.219254 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl5wc" event={"ID":"08fbe9ca-309f-4362-9103-f62b983372ae","Type":"ContainerStarted","Data":"600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5"} Jan 28 17:31:55 crc kubenswrapper[4877]: I0128 17:31:55.248428 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jl5wc" podStartSLOduration=3.420191264 podStartE2EDuration="8.24840256s" podCreationTimestamp="2026-01-28 17:31:47 +0000 UTC" firstStartedPulling="2026-01-28 17:31:49.112400727 +0000 UTC m=+3412.670727615" lastFinishedPulling="2026-01-28 17:31:53.940612023 +0000 UTC m=+3417.498938911" observedRunningTime="2026-01-28 17:31:55.244271019 +0000 UTC m=+3418.802597927" watchObservedRunningTime="2026-01-28 17:31:55.24840256 +0000 UTC m=+3418.806729448" Jan 28 17:31:57 crc kubenswrapper[4877]: I0128 17:31:57.890751 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:57 crc kubenswrapper[4877]: I0128 17:31:57.891463 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:57 crc kubenswrapper[4877]: I0128 17:31:57.962713 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:31:59 crc kubenswrapper[4877]: I0128 17:31:59.910822 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 17:31:59 crc kubenswrapper[4877]: I0128 17:31:59.911374 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 17:32:07 crc kubenswrapper[4877]: I0128 17:32:07.076354 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:32:07 crc kubenswrapper[4877]: I0128 17:32:07.077147 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:32:07 crc kubenswrapper[4877]: I0128 17:32:07.948628 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.017615 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl5wc"] Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.371513 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jl5wc" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="registry-server" containerID="cri-o://600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5" gracePeriod=2 Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.925921 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.995957 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-catalog-content\") pod \"08fbe9ca-309f-4362-9103-f62b983372ae\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.996172 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-utilities\") pod \"08fbe9ca-309f-4362-9103-f62b983372ae\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.996541 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkh76\" (UniqueName: \"kubernetes.io/projected/08fbe9ca-309f-4362-9103-f62b983372ae-kube-api-access-vkh76\") pod \"08fbe9ca-309f-4362-9103-f62b983372ae\" (UID: \"08fbe9ca-309f-4362-9103-f62b983372ae\") " Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.997045 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-utilities" (OuterVolumeSpecName: "utilities") pod "08fbe9ca-309f-4362-9103-f62b983372ae" (UID: "08fbe9ca-309f-4362-9103-f62b983372ae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:32:08 crc kubenswrapper[4877]: I0128 17:32:08.997665 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.003415 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08fbe9ca-309f-4362-9103-f62b983372ae-kube-api-access-vkh76" (OuterVolumeSpecName: "kube-api-access-vkh76") pod "08fbe9ca-309f-4362-9103-f62b983372ae" (UID: "08fbe9ca-309f-4362-9103-f62b983372ae"). InnerVolumeSpecName "kube-api-access-vkh76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.029666 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08fbe9ca-309f-4362-9103-f62b983372ae" (UID: "08fbe9ca-309f-4362-9103-f62b983372ae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.100053 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkh76\" (UniqueName: \"kubernetes.io/projected/08fbe9ca-309f-4362-9103-f62b983372ae-kube-api-access-vkh76\") on node \"crc\" DevicePath \"\"" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.100093 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08fbe9ca-309f-4362-9103-f62b983372ae-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.389941 4877 generic.go:334] "Generic (PLEG): container finished" podID="08fbe9ca-309f-4362-9103-f62b983372ae" containerID="600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5" exitCode=0 Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.389988 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl5wc" event={"ID":"08fbe9ca-309f-4362-9103-f62b983372ae","Type":"ContainerDied","Data":"600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5"} Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.390019 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jl5wc" event={"ID":"08fbe9ca-309f-4362-9103-f62b983372ae","Type":"ContainerDied","Data":"21857b7949c95beb45afa4b7969b774d6f6a13a968e7f974a0b3955e09b51f70"} Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.390038 4877 scope.go:117] "RemoveContainer" containerID="600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.390051 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jl5wc" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.429809 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl5wc"] Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.437404 4877 scope.go:117] "RemoveContainer" containerID="085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.443830 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jl5wc"] Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.471297 4877 scope.go:117] "RemoveContainer" containerID="1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.553358 4877 scope.go:117] "RemoveContainer" containerID="600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5" Jan 28 17:32:09 crc kubenswrapper[4877]: E0128 17:32:09.554492 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5\": container with ID starting with 600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5 not found: ID does not exist" containerID="600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.554533 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5"} err="failed to get container status \"600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5\": rpc error: code = NotFound desc = could not find container \"600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5\": container with ID starting with 600bb9bb9a6fd4b7a4808f4cfc971230f70d0313d53ddf31295fa6b0260439f5 not found: ID does not exist" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.554564 4877 scope.go:117] "RemoveContainer" containerID="085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf" Jan 28 17:32:09 crc kubenswrapper[4877]: E0128 17:32:09.555039 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf\": container with ID starting with 085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf not found: ID does not exist" containerID="085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.555100 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf"} err="failed to get container status \"085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf\": rpc error: code = NotFound desc = could not find container \"085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf\": container with ID starting with 085bc26bf3a2c1ad87a169952e9587f53db62696511ffc71284f04696d4821bf not found: ID does not exist" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.555134 4877 scope.go:117] "RemoveContainer" containerID="1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25" Jan 28 17:32:09 crc kubenswrapper[4877]: E0128 17:32:09.555466 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25\": container with ID starting with 1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25 not found: ID does not exist" containerID="1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25" Jan 28 17:32:09 crc kubenswrapper[4877]: I0128 17:32:09.555560 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25"} err="failed to get container status \"1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25\": rpc error: code = NotFound desc = could not find container \"1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25\": container with ID starting with 1807775e33273d8c0f8bc61b54d27c177ad88225986b088f75ce44db10b8df25 not found: ID does not exist" Jan 28 17:32:11 crc kubenswrapper[4877]: I0128 17:32:11.351780 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" path="/var/lib/kubelet/pods/08fbe9ca-309f-4362-9103-f62b983372ae/volumes" Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.076699 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.077771 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.078237 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.080117 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3dd86440232db0a12c7d2553820bdf67cd2b92550ff34d6442d63e0d796327d2"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.080252 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://3dd86440232db0a12c7d2553820bdf67cd2b92550ff34d6442d63e0d796327d2" gracePeriod=600 Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.790863 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="3dd86440232db0a12c7d2553820bdf67cd2b92550ff34d6442d63e0d796327d2" exitCode=0 Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.791011 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"3dd86440232db0a12c7d2553820bdf67cd2b92550ff34d6442d63e0d796327d2"} Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.791552 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6"} Jan 28 17:32:37 crc kubenswrapper[4877]: I0128 17:32:37.791603 4877 scope.go:117] "RemoveContainer" containerID="c91c89a9121df6406220cddf637103db008ece5691791fa32d4c98d821ddb0b1" Jan 28 17:34:37 crc kubenswrapper[4877]: I0128 17:34:37.076493 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:34:37 crc kubenswrapper[4877]: I0128 17:34:37.077455 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:35:07 crc kubenswrapper[4877]: I0128 17:35:07.077061 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:35:07 crc kubenswrapper[4877]: I0128 17:35:07.078025 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.722876 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fmmnc"] Jan 28 17:35:13 crc kubenswrapper[4877]: E0128 17:35:13.724589 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="extract-content" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.724606 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="extract-content" Jan 28 17:35:13 crc kubenswrapper[4877]: E0128 17:35:13.724629 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="registry-server" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.724635 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="registry-server" Jan 28 17:35:13 crc kubenswrapper[4877]: E0128 17:35:13.724679 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="extract-utilities" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.724685 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="extract-utilities" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.725129 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="08fbe9ca-309f-4362-9103-f62b983372ae" containerName="registry-server" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.727551 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.760172 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fmmnc"] Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.807045 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-catalog-content\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.807358 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79kfk\" (UniqueName: \"kubernetes.io/projected/024c09cd-19bb-4019-b890-26239cef2362-kube-api-access-79kfk\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.807461 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-utilities\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.910162 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79kfk\" (UniqueName: \"kubernetes.io/projected/024c09cd-19bb-4019-b890-26239cef2362-kube-api-access-79kfk\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.910468 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-utilities\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.910859 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-catalog-content\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.911063 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-utilities\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.911726 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-catalog-content\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:13 crc kubenswrapper[4877]: I0128 17:35:13.938895 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79kfk\" (UniqueName: \"kubernetes.io/projected/024c09cd-19bb-4019-b890-26239cef2362-kube-api-access-79kfk\") pod \"certified-operators-fmmnc\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:14 crc kubenswrapper[4877]: I0128 17:35:14.052860 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:14 crc kubenswrapper[4877]: I0128 17:35:14.786945 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fmmnc"] Jan 28 17:35:16 crc kubenswrapper[4877]: I0128 17:35:16.153946 4877 generic.go:334] "Generic (PLEG): container finished" podID="024c09cd-19bb-4019-b890-26239cef2362" containerID="ba784aece35a8ed63b4dc68a02435e7daa9e46ca8224d6442acf357ea66fb318" exitCode=0 Jan 28 17:35:16 crc kubenswrapper[4877]: I0128 17:35:16.154351 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmmnc" event={"ID":"024c09cd-19bb-4019-b890-26239cef2362","Type":"ContainerDied","Data":"ba784aece35a8ed63b4dc68a02435e7daa9e46ca8224d6442acf357ea66fb318"} Jan 28 17:35:16 crc kubenswrapper[4877]: I0128 17:35:16.154389 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmmnc" event={"ID":"024c09cd-19bb-4019-b890-26239cef2362","Type":"ContainerStarted","Data":"f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a"} Jan 28 17:35:18 crc kubenswrapper[4877]: I0128 17:35:18.186266 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmmnc" event={"ID":"024c09cd-19bb-4019-b890-26239cef2362","Type":"ContainerStarted","Data":"66ca72f2c78127aef42d86bac1cfd37637a74618eb0b2507176d9c4cc368d167"} Jan 28 17:35:23 crc kubenswrapper[4877]: I0128 17:35:23.251833 4877 generic.go:334] "Generic (PLEG): container finished" podID="024c09cd-19bb-4019-b890-26239cef2362" containerID="66ca72f2c78127aef42d86bac1cfd37637a74618eb0b2507176d9c4cc368d167" exitCode=0 Jan 28 17:35:23 crc kubenswrapper[4877]: I0128 17:35:23.251910 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmmnc" event={"ID":"024c09cd-19bb-4019-b890-26239cef2362","Type":"ContainerDied","Data":"66ca72f2c78127aef42d86bac1cfd37637a74618eb0b2507176d9c4cc368d167"} Jan 28 17:35:26 crc kubenswrapper[4877]: I0128 17:35:26.299016 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmmnc" event={"ID":"024c09cd-19bb-4019-b890-26239cef2362","Type":"ContainerStarted","Data":"3ae00107d9efd946aa4f3010a53b79fba039fa04edf357e5d91c9417275587ec"} Jan 28 17:35:26 crc kubenswrapper[4877]: I0128 17:35:26.328195 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fmmnc" podStartSLOduration=4.178660953 podStartE2EDuration="13.328164308s" podCreationTimestamp="2026-01-28 17:35:13 +0000 UTC" firstStartedPulling="2026-01-28 17:35:16.156737424 +0000 UTC m=+3619.715064302" lastFinishedPulling="2026-01-28 17:35:25.306240769 +0000 UTC m=+3628.864567657" observedRunningTime="2026-01-28 17:35:26.320212444 +0000 UTC m=+3629.878539332" watchObservedRunningTime="2026-01-28 17:35:26.328164308 +0000 UTC m=+3629.886491196" Jan 28 17:35:34 crc kubenswrapper[4877]: I0128 17:35:34.053711 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:34 crc kubenswrapper[4877]: I0128 17:35:34.054252 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:35 crc kubenswrapper[4877]: I0128 17:35:35.108951 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-fmmnc" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="registry-server" probeResult="failure" output=< Jan 28 17:35:35 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:35:35 crc kubenswrapper[4877]: > Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.076316 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.076889 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.076967 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.078488 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.078553 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" gracePeriod=600 Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.441192 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" exitCode=0 Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.441289 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6"} Jan 28 17:35:37 crc kubenswrapper[4877]: I0128 17:35:37.441792 4877 scope.go:117] "RemoveContainer" containerID="3dd86440232db0a12c7d2553820bdf67cd2b92550ff34d6442d63e0d796327d2" Jan 28 17:35:37 crc kubenswrapper[4877]: E0128 17:35:37.792305 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:35:38 crc kubenswrapper[4877]: I0128 17:35:38.459015 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:35:38 crc kubenswrapper[4877]: E0128 17:35:38.462251 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:35:45 crc kubenswrapper[4877]: I0128 17:35:45.116609 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-fmmnc" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="registry-server" probeResult="failure" output=< Jan 28 17:35:45 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:35:45 crc kubenswrapper[4877]: > Jan 28 17:35:52 crc kubenswrapper[4877]: I0128 17:35:52.331566 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:35:52 crc kubenswrapper[4877]: E0128 17:35:52.332758 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:35:54 crc kubenswrapper[4877]: I0128 17:35:54.111626 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:54 crc kubenswrapper[4877]: I0128 17:35:54.166419 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:54 crc kubenswrapper[4877]: I0128 17:35:54.351161 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fmmnc"] Jan 28 17:35:55 crc kubenswrapper[4877]: I0128 17:35:55.670805 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fmmnc" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="registry-server" containerID="cri-o://3ae00107d9efd946aa4f3010a53b79fba039fa04edf357e5d91c9417275587ec" gracePeriod=2 Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.574519 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-22rj2"] Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.578240 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.595916 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-22rj2"] Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.684479 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-catalog-content\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.684582 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htttq\" (UniqueName: \"kubernetes.io/projected/c27ece6e-6784-46be-a3e3-eea250434aee-kube-api-access-htttq\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.684884 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-utilities\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.687332 4877 generic.go:334] "Generic (PLEG): container finished" podID="024c09cd-19bb-4019-b890-26239cef2362" containerID="3ae00107d9efd946aa4f3010a53b79fba039fa04edf357e5d91c9417275587ec" exitCode=0 Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.687373 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmmnc" event={"ID":"024c09cd-19bb-4019-b890-26239cef2362","Type":"ContainerDied","Data":"3ae00107d9efd946aa4f3010a53b79fba039fa04edf357e5d91c9417275587ec"} Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.788175 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-catalog-content\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.788266 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htttq\" (UniqueName: \"kubernetes.io/projected/c27ece6e-6784-46be-a3e3-eea250434aee-kube-api-access-htttq\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.788338 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-utilities\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.788964 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-utilities\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.789231 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-catalog-content\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.815619 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htttq\" (UniqueName: \"kubernetes.io/projected/c27ece6e-6784-46be-a3e3-eea250434aee-kube-api-access-htttq\") pod \"community-operators-22rj2\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.885884 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.915736 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.996139 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-catalog-content\") pod \"024c09cd-19bb-4019-b890-26239cef2362\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.996357 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79kfk\" (UniqueName: \"kubernetes.io/projected/024c09cd-19bb-4019-b890-26239cef2362-kube-api-access-79kfk\") pod \"024c09cd-19bb-4019-b890-26239cef2362\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.996530 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-utilities\") pod \"024c09cd-19bb-4019-b890-26239cef2362\" (UID: \"024c09cd-19bb-4019-b890-26239cef2362\") " Jan 28 17:35:56 crc kubenswrapper[4877]: I0128 17:35:56.997867 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-utilities" (OuterVolumeSpecName: "utilities") pod "024c09cd-19bb-4019-b890-26239cef2362" (UID: "024c09cd-19bb-4019-b890-26239cef2362"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.017948 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/024c09cd-19bb-4019-b890-26239cef2362-kube-api-access-79kfk" (OuterVolumeSpecName: "kube-api-access-79kfk") pod "024c09cd-19bb-4019-b890-26239cef2362" (UID: "024c09cd-19bb-4019-b890-26239cef2362"). InnerVolumeSpecName "kube-api-access-79kfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.073317 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "024c09cd-19bb-4019-b890-26239cef2362" (UID: "024c09cd-19bb-4019-b890-26239cef2362"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.100567 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.100611 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/024c09cd-19bb-4019-b890-26239cef2362-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.100633 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79kfk\" (UniqueName: \"kubernetes.io/projected/024c09cd-19bb-4019-b890-26239cef2362-kube-api-access-79kfk\") on node \"crc\" DevicePath \"\"" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.717669 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fmmnc" event={"ID":"024c09cd-19bb-4019-b890-26239cef2362","Type":"ContainerDied","Data":"f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a"} Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.718244 4877 scope.go:117] "RemoveContainer" containerID="3ae00107d9efd946aa4f3010a53b79fba039fa04edf357e5d91c9417275587ec" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.717777 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fmmnc" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.757537 4877 scope.go:117] "RemoveContainer" containerID="66ca72f2c78127aef42d86bac1cfd37637a74618eb0b2507176d9c4cc368d167" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.760713 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fmmnc"] Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.812655 4877 scope.go:117] "RemoveContainer" containerID="ba784aece35a8ed63b4dc68a02435e7daa9e46ca8224d6442acf357ea66fb318" Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.837986 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fmmnc"] Jan 28 17:35:57 crc kubenswrapper[4877]: I0128 17:35:57.911858 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-22rj2"] Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.746397 4877 generic.go:334] "Generic (PLEG): container finished" podID="c27ece6e-6784-46be-a3e3-eea250434aee" containerID="313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22" exitCode=0 Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.746889 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-22rj2" event={"ID":"c27ece6e-6784-46be-a3e3-eea250434aee","Type":"ContainerDied","Data":"313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22"} Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.746928 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-22rj2" event={"ID":"c27ece6e-6784-46be-a3e3-eea250434aee","Type":"ContainerStarted","Data":"5a544f46db917a42b8b2c973b237e40e025ebc571f35e46284cfb4b72b5678f3"} Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.980983 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-njw7r"] Jan 28 17:35:58 crc kubenswrapper[4877]: E0128 17:35:58.982247 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="extract-utilities" Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.982278 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="extract-utilities" Jan 28 17:35:58 crc kubenswrapper[4877]: E0128 17:35:58.982295 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="registry-server" Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.982304 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="registry-server" Jan 28 17:35:58 crc kubenswrapper[4877]: E0128 17:35:58.982328 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="extract-content" Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.982337 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="extract-content" Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.982750 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="024c09cd-19bb-4019-b890-26239cef2362" containerName="registry-server" Jan 28 17:35:58 crc kubenswrapper[4877]: I0128 17:35:58.985566 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.000900 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-njw7r"] Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.080622 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snhmk\" (UniqueName: \"kubernetes.io/projected/350827e3-165e-4a46-a35a-b3cb48e566ba-kube-api-access-snhmk\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.081162 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-utilities\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.081889 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-catalog-content\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.185620 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snhmk\" (UniqueName: \"kubernetes.io/projected/350827e3-165e-4a46-a35a-b3cb48e566ba-kube-api-access-snhmk\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.188384 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-utilities\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.188992 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-catalog-content\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.189924 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-catalog-content\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.190528 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-utilities\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.217540 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snhmk\" (UniqueName: \"kubernetes.io/projected/350827e3-165e-4a46-a35a-b3cb48e566ba-kube-api-access-snhmk\") pod \"redhat-operators-njw7r\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.317142 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:35:59 crc kubenswrapper[4877]: I0128 17:35:59.390338 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="024c09cd-19bb-4019-b890-26239cef2362" path="/var/lib/kubelet/pods/024c09cd-19bb-4019-b890-26239cef2362/volumes" Jan 28 17:35:59 crc kubenswrapper[4877]: E0128 17:35:59.620972 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:00 crc kubenswrapper[4877]: I0128 17:36:00.013070 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-njw7r"] Jan 28 17:36:00 crc kubenswrapper[4877]: W0128 17:36:00.031556 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod350827e3_165e_4a46_a35a_b3cb48e566ba.slice/crio-966349fd52d21c816bb5d54d717dd0fa833c2720c9569a6b508d626173309cf4 WatchSource:0}: Error finding container 966349fd52d21c816bb5d54d717dd0fa833c2720c9569a6b508d626173309cf4: Status 404 returned error can't find the container with id 966349fd52d21c816bb5d54d717dd0fa833c2720c9569a6b508d626173309cf4 Jan 28 17:36:00 crc kubenswrapper[4877]: I0128 17:36:00.791518 4877 generic.go:334] "Generic (PLEG): container finished" podID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerID="fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f" exitCode=0 Jan 28 17:36:00 crc kubenswrapper[4877]: I0128 17:36:00.791652 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-njw7r" event={"ID":"350827e3-165e-4a46-a35a-b3cb48e566ba","Type":"ContainerDied","Data":"fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f"} Jan 28 17:36:00 crc kubenswrapper[4877]: I0128 17:36:00.792126 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-njw7r" event={"ID":"350827e3-165e-4a46-a35a-b3cb48e566ba","Type":"ContainerStarted","Data":"966349fd52d21c816bb5d54d717dd0fa833c2720c9569a6b508d626173309cf4"} Jan 28 17:36:00 crc kubenswrapper[4877]: I0128 17:36:00.802341 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-22rj2" event={"ID":"c27ece6e-6784-46be-a3e3-eea250434aee","Type":"ContainerStarted","Data":"bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee"} Jan 28 17:36:01 crc kubenswrapper[4877]: I0128 17:36:01.818368 4877 generic.go:334] "Generic (PLEG): container finished" podID="c27ece6e-6784-46be-a3e3-eea250434aee" containerID="bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee" exitCode=0 Jan 28 17:36:01 crc kubenswrapper[4877]: I0128 17:36:01.818838 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-22rj2" event={"ID":"c27ece6e-6784-46be-a3e3-eea250434aee","Type":"ContainerDied","Data":"bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee"} Jan 28 17:36:03 crc kubenswrapper[4877]: I0128 17:36:03.855732 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-22rj2" event={"ID":"c27ece6e-6784-46be-a3e3-eea250434aee","Type":"ContainerStarted","Data":"6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242"} Jan 28 17:36:03 crc kubenswrapper[4877]: I0128 17:36:03.890780 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-22rj2" podStartSLOduration=3.282800517 podStartE2EDuration="7.890745957s" podCreationTimestamp="2026-01-28 17:35:56 +0000 UTC" firstStartedPulling="2026-01-28 17:35:58.750508296 +0000 UTC m=+3662.308835184" lastFinishedPulling="2026-01-28 17:36:03.358453726 +0000 UTC m=+3666.916780624" observedRunningTime="2026-01-28 17:36:03.878084065 +0000 UTC m=+3667.436410963" watchObservedRunningTime="2026-01-28 17:36:03.890745957 +0000 UTC m=+3667.449072845" Jan 28 17:36:04 crc kubenswrapper[4877]: I0128 17:36:04.879009 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-njw7r" event={"ID":"350827e3-165e-4a46-a35a-b3cb48e566ba","Type":"ContainerStarted","Data":"75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44"} Jan 28 17:36:05 crc kubenswrapper[4877]: E0128 17:36:05.286239 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:06 crc kubenswrapper[4877]: I0128 17:36:06.916443 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:36:06 crc kubenswrapper[4877]: I0128 17:36:06.917014 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:36:06 crc kubenswrapper[4877]: I0128 17:36:06.988755 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:36:07 crc kubenswrapper[4877]: I0128 17:36:07.341312 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:36:07 crc kubenswrapper[4877]: E0128 17:36:07.341751 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:36:10 crc kubenswrapper[4877]: E0128 17:36:10.046131 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:16 crc kubenswrapper[4877]: I0128 17:36:16.975037 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.063440 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-22rj2"] Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.063842 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-22rj2" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="registry-server" containerID="cri-o://6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242" gracePeriod=2 Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.680190 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.782391 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-utilities\") pod \"c27ece6e-6784-46be-a3e3-eea250434aee\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.782524 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htttq\" (UniqueName: \"kubernetes.io/projected/c27ece6e-6784-46be-a3e3-eea250434aee-kube-api-access-htttq\") pod \"c27ece6e-6784-46be-a3e3-eea250434aee\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.782642 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-catalog-content\") pod \"c27ece6e-6784-46be-a3e3-eea250434aee\" (UID: \"c27ece6e-6784-46be-a3e3-eea250434aee\") " Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.783262 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-utilities" (OuterVolumeSpecName: "utilities") pod "c27ece6e-6784-46be-a3e3-eea250434aee" (UID: "c27ece6e-6784-46be-a3e3-eea250434aee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.788410 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c27ece6e-6784-46be-a3e3-eea250434aee-kube-api-access-htttq" (OuterVolumeSpecName: "kube-api-access-htttq") pod "c27ece6e-6784-46be-a3e3-eea250434aee" (UID: "c27ece6e-6784-46be-a3e3-eea250434aee"). InnerVolumeSpecName "kube-api-access-htttq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.788820 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.788862 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htttq\" (UniqueName: \"kubernetes.io/projected/c27ece6e-6784-46be-a3e3-eea250434aee-kube-api-access-htttq\") on node \"crc\" DevicePath \"\"" Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.852875 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c27ece6e-6784-46be-a3e3-eea250434aee" (UID: "c27ece6e-6784-46be-a3e3-eea250434aee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:36:17 crc kubenswrapper[4877]: I0128 17:36:17.891547 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c27ece6e-6784-46be-a3e3-eea250434aee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.064606 4877 generic.go:334] "Generic (PLEG): container finished" podID="c27ece6e-6784-46be-a3e3-eea250434aee" containerID="6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242" exitCode=0 Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.064653 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-22rj2" event={"ID":"c27ece6e-6784-46be-a3e3-eea250434aee","Type":"ContainerDied","Data":"6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242"} Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.064682 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-22rj2" event={"ID":"c27ece6e-6784-46be-a3e3-eea250434aee","Type":"ContainerDied","Data":"5a544f46db917a42b8b2c973b237e40e025ebc571f35e46284cfb4b72b5678f3"} Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.064699 4877 scope.go:117] "RemoveContainer" containerID="6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.064767 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-22rj2" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.090304 4877 scope.go:117] "RemoveContainer" containerID="bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.119663 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-22rj2"] Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.139267 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-22rj2"] Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.142437 4877 scope.go:117] "RemoveContainer" containerID="313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.211443 4877 scope.go:117] "RemoveContainer" containerID="6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242" Jan 28 17:36:18 crc kubenswrapper[4877]: E0128 17:36:18.212604 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242\": container with ID starting with 6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242 not found: ID does not exist" containerID="6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.212667 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242"} err="failed to get container status \"6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242\": rpc error: code = NotFound desc = could not find container \"6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242\": container with ID starting with 6f4c5e6abb28a88fb3b4a3e6a8d35abf187fcce82ecb9a4bda7097c0ded26242 not found: ID does not exist" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.212696 4877 scope.go:117] "RemoveContainer" containerID="bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee" Jan 28 17:36:18 crc kubenswrapper[4877]: E0128 17:36:18.213234 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee\": container with ID starting with bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee not found: ID does not exist" containerID="bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.213286 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee"} err="failed to get container status \"bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee\": rpc error: code = NotFound desc = could not find container \"bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee\": container with ID starting with bca243a81cebf21983142e6971ec9b212a206dfc181e7c49f5437525d0d5bbee not found: ID does not exist" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.213315 4877 scope.go:117] "RemoveContainer" containerID="313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22" Jan 28 17:36:18 crc kubenswrapper[4877]: E0128 17:36:18.213659 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22\": container with ID starting with 313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22 not found: ID does not exist" containerID="313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22" Jan 28 17:36:18 crc kubenswrapper[4877]: I0128 17:36:18.213689 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22"} err="failed to get container status \"313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22\": rpc error: code = NotFound desc = could not find container \"313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22\": container with ID starting with 313b591f53e5b53fecd1bab4c103eb71fef8ba4cb5258929aaa27d887c201a22 not found: ID does not exist" Jan 28 17:36:19 crc kubenswrapper[4877]: I0128 17:36:19.331105 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:36:19 crc kubenswrapper[4877]: E0128 17:36:19.331703 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:36:19 crc kubenswrapper[4877]: I0128 17:36:19.348225 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" path="/var/lib/kubelet/pods/c27ece6e-6784-46be-a3e3-eea250434aee/volumes" Jan 28 17:36:20 crc kubenswrapper[4877]: I0128 17:36:20.097888 4877 generic.go:334] "Generic (PLEG): container finished" podID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerID="75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44" exitCode=0 Jan 28 17:36:20 crc kubenswrapper[4877]: I0128 17:36:20.097959 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-njw7r" event={"ID":"350827e3-165e-4a46-a35a-b3cb48e566ba","Type":"ContainerDied","Data":"75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44"} Jan 28 17:36:20 crc kubenswrapper[4877]: E0128 17:36:20.364680 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:20 crc kubenswrapper[4877]: E0128 17:36:20.364689 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:21 crc kubenswrapper[4877]: I0128 17:36:21.126759 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-njw7r" event={"ID":"350827e3-165e-4a46-a35a-b3cb48e566ba","Type":"ContainerStarted","Data":"fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2"} Jan 28 17:36:21 crc kubenswrapper[4877]: I0128 17:36:21.160613 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-njw7r" podStartSLOduration=4.253926315 podStartE2EDuration="23.160592371s" podCreationTimestamp="2026-01-28 17:35:58 +0000 UTC" firstStartedPulling="2026-01-28 17:36:01.822262934 +0000 UTC m=+3665.380589822" lastFinishedPulling="2026-01-28 17:36:20.72892899 +0000 UTC m=+3684.287255878" observedRunningTime="2026-01-28 17:36:21.151495405 +0000 UTC m=+3684.709822303" watchObservedRunningTime="2026-01-28 17:36:21.160592371 +0000 UTC m=+3684.718919259" Jan 28 17:36:29 crc kubenswrapper[4877]: I0128 17:36:29.318464 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:36:29 crc kubenswrapper[4877]: I0128 17:36:29.319058 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:36:30 crc kubenswrapper[4877]: I0128 17:36:30.330622 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:36:30 crc kubenswrapper[4877]: E0128 17:36:30.331179 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:36:30 crc kubenswrapper[4877]: I0128 17:36:30.383008 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-njw7r" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="registry-server" probeResult="failure" output=< Jan 28 17:36:30 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:36:30 crc kubenswrapper[4877]: > Jan 28 17:36:30 crc kubenswrapper[4877]: E0128 17:36:30.717050 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:35 crc kubenswrapper[4877]: E0128 17:36:35.282880 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:40 crc kubenswrapper[4877]: I0128 17:36:40.388262 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-njw7r" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="registry-server" probeResult="failure" output=< Jan 28 17:36:40 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:36:40 crc kubenswrapper[4877]: > Jan 28 17:36:41 crc kubenswrapper[4877]: E0128 17:36:41.050622 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:45 crc kubenswrapper[4877]: I0128 17:36:45.330816 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:36:45 crc kubenswrapper[4877]: E0128 17:36:45.331755 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:36:48 crc kubenswrapper[4877]: E0128 17:36:48.300277 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:48 crc kubenswrapper[4877]: E0128 17:36:48.301305 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:49 crc kubenswrapper[4877]: I0128 17:36:49.509348 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:36:49 crc kubenswrapper[4877]: I0128 17:36:49.576368 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:36:49 crc kubenswrapper[4877]: I0128 17:36:49.755891 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-njw7r"] Jan 28 17:36:50 crc kubenswrapper[4877]: E0128 17:36:50.279922 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:51 crc kubenswrapper[4877]: E0128 17:36:51.099188 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod024c09cd_19bb_4019_b890_26239cef2362.slice/crio-f13313c143925484211ada784629f891a2cb9981a513b9366efcc7868631eb1a\": RecentStats: unable to find data in memory cache]" Jan 28 17:36:51 crc kubenswrapper[4877]: I0128 17:36:51.527731 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-njw7r" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="registry-server" containerID="cri-o://fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2" gracePeriod=2 Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.191309 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.326464 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-catalog-content\") pod \"350827e3-165e-4a46-a35a-b3cb48e566ba\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.326589 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-utilities\") pod \"350827e3-165e-4a46-a35a-b3cb48e566ba\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.328230 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snhmk\" (UniqueName: \"kubernetes.io/projected/350827e3-165e-4a46-a35a-b3cb48e566ba-kube-api-access-snhmk\") pod \"350827e3-165e-4a46-a35a-b3cb48e566ba\" (UID: \"350827e3-165e-4a46-a35a-b3cb48e566ba\") " Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.331061 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-utilities" (OuterVolumeSpecName: "utilities") pod "350827e3-165e-4a46-a35a-b3cb48e566ba" (UID: "350827e3-165e-4a46-a35a-b3cb48e566ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.335248 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.338297 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/350827e3-165e-4a46-a35a-b3cb48e566ba-kube-api-access-snhmk" (OuterVolumeSpecName: "kube-api-access-snhmk") pod "350827e3-165e-4a46-a35a-b3cb48e566ba" (UID: "350827e3-165e-4a46-a35a-b3cb48e566ba"). InnerVolumeSpecName "kube-api-access-snhmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.438592 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snhmk\" (UniqueName: \"kubernetes.io/projected/350827e3-165e-4a46-a35a-b3cb48e566ba-kube-api-access-snhmk\") on node \"crc\" DevicePath \"\"" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.482522 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "350827e3-165e-4a46-a35a-b3cb48e566ba" (UID: "350827e3-165e-4a46-a35a-b3cb48e566ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.541812 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/350827e3-165e-4a46-a35a-b3cb48e566ba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.544631 4877 generic.go:334] "Generic (PLEG): container finished" podID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerID="fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2" exitCode=0 Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.544756 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-njw7r" event={"ID":"350827e3-165e-4a46-a35a-b3cb48e566ba","Type":"ContainerDied","Data":"fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2"} Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.544815 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-njw7r" event={"ID":"350827e3-165e-4a46-a35a-b3cb48e566ba","Type":"ContainerDied","Data":"966349fd52d21c816bb5d54d717dd0fa833c2720c9569a6b508d626173309cf4"} Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.544838 4877 scope.go:117] "RemoveContainer" containerID="fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.545076 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-njw7r" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.589692 4877 scope.go:117] "RemoveContainer" containerID="75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.596012 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-njw7r"] Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.606701 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-njw7r"] Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.641228 4877 scope.go:117] "RemoveContainer" containerID="fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.707745 4877 scope.go:117] "RemoveContainer" containerID="fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2" Jan 28 17:36:52 crc kubenswrapper[4877]: E0128 17:36:52.708541 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2\": container with ID starting with fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2 not found: ID does not exist" containerID="fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.708600 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2"} err="failed to get container status \"fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2\": rpc error: code = NotFound desc = could not find container \"fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2\": container with ID starting with fba8156fc4c376b25918feb44d02cae8eb3968d833197c3c45f8f8760f0fa7d2 not found: ID does not exist" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.708632 4877 scope.go:117] "RemoveContainer" containerID="75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44" Jan 28 17:36:52 crc kubenswrapper[4877]: E0128 17:36:52.709227 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44\": container with ID starting with 75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44 not found: ID does not exist" containerID="75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.709257 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44"} err="failed to get container status \"75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44\": rpc error: code = NotFound desc = could not find container \"75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44\": container with ID starting with 75b568bc4a95bc0e59d7380fdb46e227ff3b09bfaba8a268eb2dfa9a031e7d44 not found: ID does not exist" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.709277 4877 scope.go:117] "RemoveContainer" containerID="fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f" Jan 28 17:36:52 crc kubenswrapper[4877]: E0128 17:36:52.709880 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f\": container with ID starting with fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f not found: ID does not exist" containerID="fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f" Jan 28 17:36:52 crc kubenswrapper[4877]: I0128 17:36:52.709911 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f"} err="failed to get container status \"fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f\": rpc error: code = NotFound desc = could not find container \"fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f\": container with ID starting with fd369653e91817cc80c6d2ae306026e08a8e41fb848be323a5a582f42747d39f not found: ID does not exist" Jan 28 17:36:53 crc kubenswrapper[4877]: I0128 17:36:53.345816 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" path="/var/lib/kubelet/pods/350827e3-165e-4a46-a35a-b3cb48e566ba/volumes" Jan 28 17:36:57 crc kubenswrapper[4877]: I0128 17:36:57.341552 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:36:57 crc kubenswrapper[4877]: E0128 17:36:57.342326 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:37:12 crc kubenswrapper[4877]: I0128 17:37:12.330737 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:37:12 crc kubenswrapper[4877]: E0128 17:37:12.331664 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:37:26 crc kubenswrapper[4877]: I0128 17:37:26.330522 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:37:26 crc kubenswrapper[4877]: E0128 17:37:26.331292 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:37:40 crc kubenswrapper[4877]: I0128 17:37:40.331904 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:37:40 crc kubenswrapper[4877]: E0128 17:37:40.332945 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:37:52 crc kubenswrapper[4877]: I0128 17:37:52.331046 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:37:52 crc kubenswrapper[4877]: E0128 17:37:52.331842 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:38:03 crc kubenswrapper[4877]: I0128 17:38:03.331844 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:38:03 crc kubenswrapper[4877]: E0128 17:38:03.333516 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:38:16 crc kubenswrapper[4877]: I0128 17:38:16.330578 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:38:16 crc kubenswrapper[4877]: E0128 17:38:16.333355 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:38:28 crc kubenswrapper[4877]: I0128 17:38:28.331545 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:38:28 crc kubenswrapper[4877]: E0128 17:38:28.333821 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:38:41 crc kubenswrapper[4877]: I0128 17:38:41.338740 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:38:41 crc kubenswrapper[4877]: E0128 17:38:41.343158 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:38:54 crc kubenswrapper[4877]: I0128 17:38:54.332167 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:38:54 crc kubenswrapper[4877]: E0128 17:38:54.334915 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:39:07 crc kubenswrapper[4877]: I0128 17:39:07.352635 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:39:07 crc kubenswrapper[4877]: E0128 17:39:07.353943 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:39:22 crc kubenswrapper[4877]: I0128 17:39:22.331317 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:39:22 crc kubenswrapper[4877]: E0128 17:39:22.332389 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:39:33 crc kubenswrapper[4877]: I0128 17:39:33.330875 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:39:33 crc kubenswrapper[4877]: E0128 17:39:33.331716 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:39:48 crc kubenswrapper[4877]: I0128 17:39:48.330814 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:39:48 crc kubenswrapper[4877]: E0128 17:39:48.333374 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:40:02 crc kubenswrapper[4877]: I0128 17:40:02.331886 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:40:02 crc kubenswrapper[4877]: E0128 17:40:02.333726 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:40:14 crc kubenswrapper[4877]: I0128 17:40:14.331442 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:40:14 crc kubenswrapper[4877]: E0128 17:40:14.332413 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:40:27 crc kubenswrapper[4877]: I0128 17:40:27.343082 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:40:27 crc kubenswrapper[4877]: E0128 17:40:27.344548 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:40:39 crc kubenswrapper[4877]: I0128 17:40:39.332854 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:40:40 crc kubenswrapper[4877]: I0128 17:40:40.558506 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"637c7a46fd451240eeb80bc0e7638e276252f9e4d1c404e0e56502e717e91fdb"} Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.250240 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mkwhz"] Jan 28 17:41:56 crc kubenswrapper[4877]: E0128 17:41:56.252353 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="extract-utilities" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.252440 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="extract-utilities" Jan 28 17:41:56 crc kubenswrapper[4877]: E0128 17:41:56.252544 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="registry-server" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.252622 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="registry-server" Jan 28 17:41:56 crc kubenswrapper[4877]: E0128 17:41:56.252702 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="registry-server" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.252755 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="registry-server" Jan 28 17:41:56 crc kubenswrapper[4877]: E0128 17:41:56.252816 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="extract-utilities" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.252869 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="extract-utilities" Jan 28 17:41:56 crc kubenswrapper[4877]: E0128 17:41:56.252954 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="extract-content" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.253011 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="extract-content" Jan 28 17:41:56 crc kubenswrapper[4877]: E0128 17:41:56.253097 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="extract-content" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.253154 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="extract-content" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.253488 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27ece6e-6784-46be-a3e3-eea250434aee" containerName="registry-server" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.253562 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="350827e3-165e-4a46-a35a-b3cb48e566ba" containerName="registry-server" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.255403 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.268229 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkwhz"] Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.434517 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgsbl\" (UniqueName: \"kubernetes.io/projected/8291c5ff-f694-4f09-8836-88f0b70ac943-kube-api-access-hgsbl\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.434579 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-catalog-content\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.434786 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-utilities\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.538688 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-catalog-content\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.539073 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-utilities\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.539256 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgsbl\" (UniqueName: \"kubernetes.io/projected/8291c5ff-f694-4f09-8836-88f0b70ac943-kube-api-access-hgsbl\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.540645 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-catalog-content\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.541452 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-utilities\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.567144 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgsbl\" (UniqueName: \"kubernetes.io/projected/8291c5ff-f694-4f09-8836-88f0b70ac943-kube-api-access-hgsbl\") pod \"redhat-marketplace-mkwhz\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:56 crc kubenswrapper[4877]: I0128 17:41:56.589001 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:41:57 crc kubenswrapper[4877]: I0128 17:41:57.258039 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkwhz"] Jan 28 17:41:57 crc kubenswrapper[4877]: I0128 17:41:57.581857 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkwhz" event={"ID":"8291c5ff-f694-4f09-8836-88f0b70ac943","Type":"ContainerStarted","Data":"ca8e731d45486e77652b34fea57ad85eeff42adbb241edbfe00cb8003eeeea5a"} Jan 28 17:41:58 crc kubenswrapper[4877]: I0128 17:41:58.598979 4877 generic.go:334] "Generic (PLEG): container finished" podID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerID="9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479" exitCode=0 Jan 28 17:41:58 crc kubenswrapper[4877]: I0128 17:41:58.599130 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkwhz" event={"ID":"8291c5ff-f694-4f09-8836-88f0b70ac943","Type":"ContainerDied","Data":"9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479"} Jan 28 17:41:58 crc kubenswrapper[4877]: I0128 17:41:58.602122 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:42:00 crc kubenswrapper[4877]: I0128 17:42:00.628612 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkwhz" event={"ID":"8291c5ff-f694-4f09-8836-88f0b70ac943","Type":"ContainerStarted","Data":"07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2"} Jan 28 17:42:01 crc kubenswrapper[4877]: I0128 17:42:01.653800 4877 generic.go:334] "Generic (PLEG): container finished" podID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerID="07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2" exitCode=0 Jan 28 17:42:01 crc kubenswrapper[4877]: I0128 17:42:01.654358 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkwhz" event={"ID":"8291c5ff-f694-4f09-8836-88f0b70ac943","Type":"ContainerDied","Data":"07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2"} Jan 28 17:42:02 crc kubenswrapper[4877]: I0128 17:42:02.682105 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkwhz" event={"ID":"8291c5ff-f694-4f09-8836-88f0b70ac943","Type":"ContainerStarted","Data":"9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841"} Jan 28 17:42:02 crc kubenswrapper[4877]: I0128 17:42:02.704310 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mkwhz" podStartSLOduration=3.171347607 podStartE2EDuration="6.704283907s" podCreationTimestamp="2026-01-28 17:41:56 +0000 UTC" firstStartedPulling="2026-01-28 17:41:58.601658458 +0000 UTC m=+4022.159985346" lastFinishedPulling="2026-01-28 17:42:02.134594758 +0000 UTC m=+4025.692921646" observedRunningTime="2026-01-28 17:42:02.70032612 +0000 UTC m=+4026.258653018" watchObservedRunningTime="2026-01-28 17:42:02.704283907 +0000 UTC m=+4026.262610795" Jan 28 17:42:06 crc kubenswrapper[4877]: I0128 17:42:06.589398 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:42:06 crc kubenswrapper[4877]: I0128 17:42:06.592051 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:42:06 crc kubenswrapper[4877]: I0128 17:42:06.661453 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:42:16 crc kubenswrapper[4877]: I0128 17:42:16.650623 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:42:16 crc kubenswrapper[4877]: I0128 17:42:16.717843 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkwhz"] Jan 28 17:42:16 crc kubenswrapper[4877]: I0128 17:42:16.883101 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mkwhz" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="registry-server" containerID="cri-o://9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841" gracePeriod=2 Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.632291 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.746161 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-utilities\") pod \"8291c5ff-f694-4f09-8836-88f0b70ac943\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.746775 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgsbl\" (UniqueName: \"kubernetes.io/projected/8291c5ff-f694-4f09-8836-88f0b70ac943-kube-api-access-hgsbl\") pod \"8291c5ff-f694-4f09-8836-88f0b70ac943\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.746876 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-catalog-content\") pod \"8291c5ff-f694-4f09-8836-88f0b70ac943\" (UID: \"8291c5ff-f694-4f09-8836-88f0b70ac943\") " Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.749787 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-utilities" (OuterVolumeSpecName: "utilities") pod "8291c5ff-f694-4f09-8836-88f0b70ac943" (UID: "8291c5ff-f694-4f09-8836-88f0b70ac943"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.758723 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8291c5ff-f694-4f09-8836-88f0b70ac943-kube-api-access-hgsbl" (OuterVolumeSpecName: "kube-api-access-hgsbl") pod "8291c5ff-f694-4f09-8836-88f0b70ac943" (UID: "8291c5ff-f694-4f09-8836-88f0b70ac943"). InnerVolumeSpecName "kube-api-access-hgsbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.773584 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8291c5ff-f694-4f09-8836-88f0b70ac943" (UID: "8291c5ff-f694-4f09-8836-88f0b70ac943"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.849676 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.849733 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgsbl\" (UniqueName: \"kubernetes.io/projected/8291c5ff-f694-4f09-8836-88f0b70ac943-kube-api-access-hgsbl\") on node \"crc\" DevicePath \"\"" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.849746 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291c5ff-f694-4f09-8836-88f0b70ac943-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.899517 4877 generic.go:334] "Generic (PLEG): container finished" podID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerID="9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841" exitCode=0 Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.899583 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkwhz" event={"ID":"8291c5ff-f694-4f09-8836-88f0b70ac943","Type":"ContainerDied","Data":"9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841"} Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.899624 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkwhz" event={"ID":"8291c5ff-f694-4f09-8836-88f0b70ac943","Type":"ContainerDied","Data":"ca8e731d45486e77652b34fea57ad85eeff42adbb241edbfe00cb8003eeeea5a"} Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.899648 4877 scope.go:117] "RemoveContainer" containerID="9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.899661 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkwhz" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.945092 4877 scope.go:117] "RemoveContainer" containerID="07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2" Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.954544 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkwhz"] Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.970000 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkwhz"] Jan 28 17:42:17 crc kubenswrapper[4877]: I0128 17:42:17.979123 4877 scope.go:117] "RemoveContainer" containerID="9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479" Jan 28 17:42:18 crc kubenswrapper[4877]: I0128 17:42:18.043504 4877 scope.go:117] "RemoveContainer" containerID="9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841" Jan 28 17:42:18 crc kubenswrapper[4877]: E0128 17:42:18.044104 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841\": container with ID starting with 9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841 not found: ID does not exist" containerID="9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841" Jan 28 17:42:18 crc kubenswrapper[4877]: I0128 17:42:18.044141 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841"} err="failed to get container status \"9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841\": rpc error: code = NotFound desc = could not find container \"9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841\": container with ID starting with 9313e1b5ddd6b93ebe5462b3033673f1ed5218295bbb23eb3f5d278fb2fd0841 not found: ID does not exist" Jan 28 17:42:18 crc kubenswrapper[4877]: I0128 17:42:18.044167 4877 scope.go:117] "RemoveContainer" containerID="07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2" Jan 28 17:42:18 crc kubenswrapper[4877]: E0128 17:42:18.044426 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2\": container with ID starting with 07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2 not found: ID does not exist" containerID="07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2" Jan 28 17:42:18 crc kubenswrapper[4877]: I0128 17:42:18.044447 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2"} err="failed to get container status \"07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2\": rpc error: code = NotFound desc = could not find container \"07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2\": container with ID starting with 07b900bc2feedd79478e5452757d7bc43a0300fb4c58aef3ec2304a4d89f79a2 not found: ID does not exist" Jan 28 17:42:18 crc kubenswrapper[4877]: I0128 17:42:18.044465 4877 scope.go:117] "RemoveContainer" containerID="9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479" Jan 28 17:42:18 crc kubenswrapper[4877]: E0128 17:42:18.044718 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479\": container with ID starting with 9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479 not found: ID does not exist" containerID="9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479" Jan 28 17:42:18 crc kubenswrapper[4877]: I0128 17:42:18.044739 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479"} err="failed to get container status \"9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479\": rpc error: code = NotFound desc = could not find container \"9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479\": container with ID starting with 9b44173ba760114fe00ca7e624dce9b41e73afe88c764891d39958594f84a479 not found: ID does not exist" Jan 28 17:42:19 crc kubenswrapper[4877]: I0128 17:42:19.363022 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" path="/var/lib/kubelet/pods/8291c5ff-f694-4f09-8836-88f0b70ac943/volumes" Jan 28 17:43:07 crc kubenswrapper[4877]: I0128 17:43:07.076895 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:43:07 crc kubenswrapper[4877]: I0128 17:43:07.077971 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:43:37 crc kubenswrapper[4877]: I0128 17:43:37.077075 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:43:37 crc kubenswrapper[4877]: I0128 17:43:37.078105 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:44:07 crc kubenswrapper[4877]: I0128 17:44:07.076870 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:44:07 crc kubenswrapper[4877]: I0128 17:44:07.077865 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:44:07 crc kubenswrapper[4877]: I0128 17:44:07.077934 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:44:07 crc kubenswrapper[4877]: I0128 17:44:07.079138 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"637c7a46fd451240eeb80bc0e7638e276252f9e4d1c404e0e56502e717e91fdb"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:44:07 crc kubenswrapper[4877]: I0128 17:44:07.079229 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://637c7a46fd451240eeb80bc0e7638e276252f9e4d1c404e0e56502e717e91fdb" gracePeriod=600 Jan 28 17:44:08 crc kubenswrapper[4877]: I0128 17:44:08.226573 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="637c7a46fd451240eeb80bc0e7638e276252f9e4d1c404e0e56502e717e91fdb" exitCode=0 Jan 28 17:44:08 crc kubenswrapper[4877]: I0128 17:44:08.226670 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"637c7a46fd451240eeb80bc0e7638e276252f9e4d1c404e0e56502e717e91fdb"} Jan 28 17:44:08 crc kubenswrapper[4877]: I0128 17:44:08.227192 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b"} Jan 28 17:44:08 crc kubenswrapper[4877]: I0128 17:44:08.227226 4877 scope.go:117] "RemoveContainer" containerID="285d7a06a05bfa3b4a6979453a752cc1bfaef0fdd23ef3d5f74473ca992f56c6" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.254521 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br"] Jan 28 17:45:00 crc kubenswrapper[4877]: E0128 17:45:00.256602 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="extract-utilities" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.256628 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="extract-utilities" Jan 28 17:45:00 crc kubenswrapper[4877]: E0128 17:45:00.256671 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="extract-content" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.256681 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="extract-content" Jan 28 17:45:00 crc kubenswrapper[4877]: E0128 17:45:00.256748 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="registry-server" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.256760 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="registry-server" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.257075 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="8291c5ff-f694-4f09-8836-88f0b70ac943" containerName="registry-server" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.258555 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.263243 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.263574 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.273644 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br"] Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.395292 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7v9v\" (UniqueName: \"kubernetes.io/projected/a004cbc7-cb0a-479f-9b13-a8a70ccff069-kube-api-access-q7v9v\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.395682 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a004cbc7-cb0a-479f-9b13-a8a70ccff069-config-volume\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.396361 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a004cbc7-cb0a-479f-9b13-a8a70ccff069-secret-volume\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.499014 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7v9v\" (UniqueName: \"kubernetes.io/projected/a004cbc7-cb0a-479f-9b13-a8a70ccff069-kube-api-access-q7v9v\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.499282 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a004cbc7-cb0a-479f-9b13-a8a70ccff069-config-volume\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.499467 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a004cbc7-cb0a-479f-9b13-a8a70ccff069-secret-volume\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.500269 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a004cbc7-cb0a-479f-9b13-a8a70ccff069-config-volume\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.506706 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a004cbc7-cb0a-479f-9b13-a8a70ccff069-secret-volume\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.518440 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7v9v\" (UniqueName: \"kubernetes.io/projected/a004cbc7-cb0a-479f-9b13-a8a70ccff069-kube-api-access-q7v9v\") pod \"collect-profiles-29493705-fn7br\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:00 crc kubenswrapper[4877]: I0128 17:45:00.595922 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:01 crc kubenswrapper[4877]: I0128 17:45:01.185290 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br"] Jan 28 17:45:01 crc kubenswrapper[4877]: I0128 17:45:01.946010 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" event={"ID":"a004cbc7-cb0a-479f-9b13-a8a70ccff069","Type":"ContainerStarted","Data":"92871288ca1d99224deaa9db050c6bc58d0c645ca8cc9cf648d29b9d2f51a82f"} Jan 28 17:45:01 crc kubenswrapper[4877]: I0128 17:45:01.946712 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" event={"ID":"a004cbc7-cb0a-479f-9b13-a8a70ccff069","Type":"ContainerStarted","Data":"f7ec3b8d16884ff5f08efbb7fe700d3f3ae89e8538f8bd51a1fa503f47a60b31"} Jan 28 17:45:01 crc kubenswrapper[4877]: I0128 17:45:01.992020 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" podStartSLOduration=1.991997306 podStartE2EDuration="1.991997306s" podCreationTimestamp="2026-01-28 17:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 17:45:01.974913664 +0000 UTC m=+4205.533240552" watchObservedRunningTime="2026-01-28 17:45:01.991997306 +0000 UTC m=+4205.550324194" Jan 28 17:45:02 crc kubenswrapper[4877]: I0128 17:45:02.956174 4877 generic.go:334] "Generic (PLEG): container finished" podID="a004cbc7-cb0a-479f-9b13-a8a70ccff069" containerID="92871288ca1d99224deaa9db050c6bc58d0c645ca8cc9cf648d29b9d2f51a82f" exitCode=0 Jan 28 17:45:02 crc kubenswrapper[4877]: I0128 17:45:02.956303 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" event={"ID":"a004cbc7-cb0a-479f-9b13-a8a70ccff069","Type":"ContainerDied","Data":"92871288ca1d99224deaa9db050c6bc58d0c645ca8cc9cf648d29b9d2f51a82f"} Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.813486 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.931314 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a004cbc7-cb0a-479f-9b13-a8a70ccff069-config-volume\") pod \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.931558 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a004cbc7-cb0a-479f-9b13-a8a70ccff069-secret-volume\") pod \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.931591 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7v9v\" (UniqueName: \"kubernetes.io/projected/a004cbc7-cb0a-479f-9b13-a8a70ccff069-kube-api-access-q7v9v\") pod \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\" (UID: \"a004cbc7-cb0a-479f-9b13-a8a70ccff069\") " Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.932158 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a004cbc7-cb0a-479f-9b13-a8a70ccff069-config-volume" (OuterVolumeSpecName: "config-volume") pod "a004cbc7-cb0a-479f-9b13-a8a70ccff069" (UID: "a004cbc7-cb0a-479f-9b13-a8a70ccff069"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.939070 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a004cbc7-cb0a-479f-9b13-a8a70ccff069-kube-api-access-q7v9v" (OuterVolumeSpecName: "kube-api-access-q7v9v") pod "a004cbc7-cb0a-479f-9b13-a8a70ccff069" (UID: "a004cbc7-cb0a-479f-9b13-a8a70ccff069"). InnerVolumeSpecName "kube-api-access-q7v9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.941704 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a004cbc7-cb0a-479f-9b13-a8a70ccff069-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a004cbc7-cb0a-479f-9b13-a8a70ccff069" (UID: "a004cbc7-cb0a-479f-9b13-a8a70ccff069"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.983297 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.984651 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493705-fn7br" event={"ID":"a004cbc7-cb0a-479f-9b13-a8a70ccff069","Type":"ContainerDied","Data":"f7ec3b8d16884ff5f08efbb7fe700d3f3ae89e8538f8bd51a1fa503f47a60b31"} Jan 28 17:45:04 crc kubenswrapper[4877]: I0128 17:45:04.984743 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7ec3b8d16884ff5f08efbb7fe700d3f3ae89e8538f8bd51a1fa503f47a60b31" Jan 28 17:45:05 crc kubenswrapper[4877]: I0128 17:45:05.034147 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a004cbc7-cb0a-479f-9b13-a8a70ccff069-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:45:05 crc kubenswrapper[4877]: I0128 17:45:05.034182 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a004cbc7-cb0a-479f-9b13-a8a70ccff069-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 17:45:05 crc kubenswrapper[4877]: I0128 17:45:05.034193 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7v9v\" (UniqueName: \"kubernetes.io/projected/a004cbc7-cb0a-479f-9b13-a8a70ccff069-kube-api-access-q7v9v\") on node \"crc\" DevicePath \"\"" Jan 28 17:45:05 crc kubenswrapper[4877]: I0128 17:45:05.910132 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9"] Jan 28 17:45:05 crc kubenswrapper[4877]: I0128 17:45:05.927068 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493660-wbkv9"] Jan 28 17:45:07 crc kubenswrapper[4877]: I0128 17:45:07.349274 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54351f81-b326-424d-8061-5108152ce046" path="/var/lib/kubelet/pods/54351f81-b326-424d-8061-5108152ce046/volumes" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.349112 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qc2lp"] Jan 28 17:45:16 crc kubenswrapper[4877]: E0128 17:45:16.350825 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a004cbc7-cb0a-479f-9b13-a8a70ccff069" containerName="collect-profiles" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.350846 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="a004cbc7-cb0a-479f-9b13-a8a70ccff069" containerName="collect-profiles" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.351267 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="a004cbc7-cb0a-479f-9b13-a8a70ccff069" containerName="collect-profiles" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.358949 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.367284 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qc2lp"] Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.446679 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-utilities\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.446798 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-catalog-content\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.446830 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5nkp\" (UniqueName: \"kubernetes.io/projected/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-kube-api-access-n5nkp\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.548924 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-catalog-content\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.548996 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5nkp\" (UniqueName: \"kubernetes.io/projected/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-kube-api-access-n5nkp\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.549209 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-utilities\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.549413 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-catalog-content\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.549683 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-utilities\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.856518 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5nkp\" (UniqueName: \"kubernetes.io/projected/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-kube-api-access-n5nkp\") pod \"certified-operators-qc2lp\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:16 crc kubenswrapper[4877]: I0128 17:45:16.994218 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:17 crc kubenswrapper[4877]: I0128 17:45:17.556266 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qc2lp"] Jan 28 17:45:18 crc kubenswrapper[4877]: I0128 17:45:18.168847 4877 generic.go:334] "Generic (PLEG): container finished" podID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerID="cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e" exitCode=0 Jan 28 17:45:18 crc kubenswrapper[4877]: I0128 17:45:18.169169 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qc2lp" event={"ID":"76fd644b-7eb3-4f91-9814-91f65b4fcc2c","Type":"ContainerDied","Data":"cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e"} Jan 28 17:45:18 crc kubenswrapper[4877]: I0128 17:45:18.169221 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qc2lp" event={"ID":"76fd644b-7eb3-4f91-9814-91f65b4fcc2c","Type":"ContainerStarted","Data":"87d1b5fe78d9ba63148d54395ffda8be9e36b27d035747e0a1a5035aa9bc2f0c"} Jan 28 17:45:20 crc kubenswrapper[4877]: I0128 17:45:20.193086 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qc2lp" event={"ID":"76fd644b-7eb3-4f91-9814-91f65b4fcc2c","Type":"ContainerStarted","Data":"9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d"} Jan 28 17:45:21 crc kubenswrapper[4877]: I0128 17:45:21.204152 4877 generic.go:334] "Generic (PLEG): container finished" podID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerID="9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d" exitCode=0 Jan 28 17:45:21 crc kubenswrapper[4877]: I0128 17:45:21.204304 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qc2lp" event={"ID":"76fd644b-7eb3-4f91-9814-91f65b4fcc2c","Type":"ContainerDied","Data":"9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d"} Jan 28 17:45:22 crc kubenswrapper[4877]: I0128 17:45:22.217224 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qc2lp" event={"ID":"76fd644b-7eb3-4f91-9814-91f65b4fcc2c","Type":"ContainerStarted","Data":"780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b"} Jan 28 17:45:22 crc kubenswrapper[4877]: I0128 17:45:22.248793 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qc2lp" podStartSLOduration=2.7923021180000003 podStartE2EDuration="6.248772869s" podCreationTimestamp="2026-01-28 17:45:16 +0000 UTC" firstStartedPulling="2026-01-28 17:45:18.17129668 +0000 UTC m=+4221.729623568" lastFinishedPulling="2026-01-28 17:45:21.627767411 +0000 UTC m=+4225.186094319" observedRunningTime="2026-01-28 17:45:22.241034709 +0000 UTC m=+4225.799361607" watchObservedRunningTime="2026-01-28 17:45:22.248772869 +0000 UTC m=+4225.807099757" Jan 28 17:45:26 crc kubenswrapper[4877]: I0128 17:45:26.994959 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:26 crc kubenswrapper[4877]: I0128 17:45:26.995559 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:27 crc kubenswrapper[4877]: I0128 17:45:27.056093 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:27 crc kubenswrapper[4877]: I0128 17:45:27.348056 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:27 crc kubenswrapper[4877]: I0128 17:45:27.412505 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qc2lp"] Jan 28 17:45:29 crc kubenswrapper[4877]: I0128 17:45:29.304556 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qc2lp" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="registry-server" containerID="cri-o://780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b" gracePeriod=2 Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.132021 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.294775 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-utilities\") pod \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.294839 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5nkp\" (UniqueName: \"kubernetes.io/projected/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-kube-api-access-n5nkp\") pod \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.294984 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-catalog-content\") pod \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\" (UID: \"76fd644b-7eb3-4f91-9814-91f65b4fcc2c\") " Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.295860 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-utilities" (OuterVolumeSpecName: "utilities") pod "76fd644b-7eb3-4f91-9814-91f65b4fcc2c" (UID: "76fd644b-7eb3-4f91-9814-91f65b4fcc2c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.300599 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-kube-api-access-n5nkp" (OuterVolumeSpecName: "kube-api-access-n5nkp") pod "76fd644b-7eb3-4f91-9814-91f65b4fcc2c" (UID: "76fd644b-7eb3-4f91-9814-91f65b4fcc2c"). InnerVolumeSpecName "kube-api-access-n5nkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.323145 4877 generic.go:334] "Generic (PLEG): container finished" podID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerID="780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b" exitCode=0 Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.323231 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qc2lp" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.323216 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qc2lp" event={"ID":"76fd644b-7eb3-4f91-9814-91f65b4fcc2c","Type":"ContainerDied","Data":"780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b"} Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.323436 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qc2lp" event={"ID":"76fd644b-7eb3-4f91-9814-91f65b4fcc2c","Type":"ContainerDied","Data":"87d1b5fe78d9ba63148d54395ffda8be9e36b27d035747e0a1a5035aa9bc2f0c"} Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.323462 4877 scope.go:117] "RemoveContainer" containerID="780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.378078 4877 scope.go:117] "RemoveContainer" containerID="9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.398401 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.398444 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5nkp\" (UniqueName: \"kubernetes.io/projected/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-kube-api-access-n5nkp\") on node \"crc\" DevicePath \"\"" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.404135 4877 scope.go:117] "RemoveContainer" containerID="cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.468358 4877 scope.go:117] "RemoveContainer" containerID="780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b" Jan 28 17:45:30 crc kubenswrapper[4877]: E0128 17:45:30.468831 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b\": container with ID starting with 780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b not found: ID does not exist" containerID="780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.468874 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b"} err="failed to get container status \"780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b\": rpc error: code = NotFound desc = could not find container \"780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b\": container with ID starting with 780c96ff9cac578c85eeb31aefc092d0e993c667aebddfc3420fc400b5b7335b not found: ID does not exist" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.468899 4877 scope.go:117] "RemoveContainer" containerID="9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d" Jan 28 17:45:30 crc kubenswrapper[4877]: E0128 17:45:30.469323 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d\": container with ID starting with 9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d not found: ID does not exist" containerID="9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.469362 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d"} err="failed to get container status \"9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d\": rpc error: code = NotFound desc = could not find container \"9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d\": container with ID starting with 9edc7229e6b7e43cab3edd182349fe0ad113a2ca84c3ca2e037f9d32e91a112d not found: ID does not exist" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.469381 4877 scope.go:117] "RemoveContainer" containerID="cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e" Jan 28 17:45:30 crc kubenswrapper[4877]: E0128 17:45:30.469683 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e\": container with ID starting with cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e not found: ID does not exist" containerID="cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.469726 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e"} err="failed to get container status \"cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e\": rpc error: code = NotFound desc = could not find container \"cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e\": container with ID starting with cefe7e7d44cb78126f34840160b0b000a77a915efbfc9a18a206d17939e8b28e not found: ID does not exist" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.798056 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76fd644b-7eb3-4f91-9814-91f65b4fcc2c" (UID: "76fd644b-7eb3-4f91-9814-91f65b4fcc2c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.812743 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76fd644b-7eb3-4f91-9814-91f65b4fcc2c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.974284 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qc2lp"] Jan 28 17:45:30 crc kubenswrapper[4877]: I0128 17:45:30.989373 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qc2lp"] Jan 28 17:45:31 crc kubenswrapper[4877]: I0128 17:45:31.346663 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" path="/var/lib/kubelet/pods/76fd644b-7eb3-4f91-9814-91f65b4fcc2c/volumes" Jan 28 17:45:31 crc kubenswrapper[4877]: I0128 17:45:31.358791 4877 scope.go:117] "RemoveContainer" containerID="3c996a19f468a70994d82ee768412f94d27a7d28bcbfd15dbbef65ec0eee11ed" Jan 28 17:46:07 crc kubenswrapper[4877]: I0128 17:46:07.077003 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:46:07 crc kubenswrapper[4877]: I0128 17:46:07.077444 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:46:37 crc kubenswrapper[4877]: I0128 17:46:37.075962 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:46:37 crc kubenswrapper[4877]: I0128 17:46:37.076565 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.076618 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.077206 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.077263 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.078300 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.078374 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" gracePeriod=600 Jan 28 17:47:07 crc kubenswrapper[4877]: E0128 17:47:07.209507 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.385673 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" exitCode=0 Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.385736 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b"} Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.385776 4877 scope.go:117] "RemoveContainer" containerID="637c7a46fd451240eeb80bc0e7638e276252f9e4d1c404e0e56502e717e91fdb" Jan 28 17:47:07 crc kubenswrapper[4877]: I0128 17:47:07.386542 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:47:07 crc kubenswrapper[4877]: E0128 17:47:07.386839 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.750860 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kh7dp"] Jan 28 17:47:10 crc kubenswrapper[4877]: E0128 17:47:10.751782 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="registry-server" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.751856 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="registry-server" Jan 28 17:47:10 crc kubenswrapper[4877]: E0128 17:47:10.751875 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="extract-utilities" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.751882 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="extract-utilities" Jan 28 17:47:10 crc kubenswrapper[4877]: E0128 17:47:10.751928 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="extract-content" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.751935 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="extract-content" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.752153 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="76fd644b-7eb3-4f91-9814-91f65b4fcc2c" containerName="registry-server" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.754097 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.767802 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kh7dp"] Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.838269 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-catalog-content\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.838316 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-utilities\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.838499 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b24t8\" (UniqueName: \"kubernetes.io/projected/6220c90b-28e3-4bed-a6bf-7cccff4e2877-kube-api-access-b24t8\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.942391 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-catalog-content\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.942453 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-utilities\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.942604 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b24t8\" (UniqueName: \"kubernetes.io/projected/6220c90b-28e3-4bed-a6bf-7cccff4e2877-kube-api-access-b24t8\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.943148 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-catalog-content\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.943507 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-utilities\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:10 crc kubenswrapper[4877]: I0128 17:47:10.975575 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b24t8\" (UniqueName: \"kubernetes.io/projected/6220c90b-28e3-4bed-a6bf-7cccff4e2877-kube-api-access-b24t8\") pod \"community-operators-kh7dp\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:11 crc kubenswrapper[4877]: I0128 17:47:11.090363 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:11 crc kubenswrapper[4877]: I0128 17:47:11.662796 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kh7dp"] Jan 28 17:47:12 crc kubenswrapper[4877]: I0128 17:47:12.447332 4877 generic.go:334] "Generic (PLEG): container finished" podID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerID="7a49e5544ec8d67fe6f42164fc6e4ae195dec330c530c3312070ccc9200e4556" exitCode=0 Jan 28 17:47:12 crc kubenswrapper[4877]: I0128 17:47:12.447460 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh7dp" event={"ID":"6220c90b-28e3-4bed-a6bf-7cccff4e2877","Type":"ContainerDied","Data":"7a49e5544ec8d67fe6f42164fc6e4ae195dec330c530c3312070ccc9200e4556"} Jan 28 17:47:12 crc kubenswrapper[4877]: I0128 17:47:12.447963 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh7dp" event={"ID":"6220c90b-28e3-4bed-a6bf-7cccff4e2877","Type":"ContainerStarted","Data":"cdee1105a5a5229d415d9629fdd83e5b3cb681cbdc1d0be8c049ec26067c6815"} Jan 28 17:47:12 crc kubenswrapper[4877]: I0128 17:47:12.451376 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:47:13 crc kubenswrapper[4877]: I0128 17:47:13.462708 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh7dp" event={"ID":"6220c90b-28e3-4bed-a6bf-7cccff4e2877","Type":"ContainerStarted","Data":"e0d79d4fa487badeab8a1b15654a1cbc3d4862acbd7af2d8607bea25b3a5c094"} Jan 28 17:47:14 crc kubenswrapper[4877]: I0128 17:47:14.478737 4877 generic.go:334] "Generic (PLEG): container finished" podID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerID="e0d79d4fa487badeab8a1b15654a1cbc3d4862acbd7af2d8607bea25b3a5c094" exitCode=0 Jan 28 17:47:14 crc kubenswrapper[4877]: I0128 17:47:14.478803 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh7dp" event={"ID":"6220c90b-28e3-4bed-a6bf-7cccff4e2877","Type":"ContainerDied","Data":"e0d79d4fa487badeab8a1b15654a1cbc3d4862acbd7af2d8607bea25b3a5c094"} Jan 28 17:47:15 crc kubenswrapper[4877]: I0128 17:47:15.491762 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh7dp" event={"ID":"6220c90b-28e3-4bed-a6bf-7cccff4e2877","Type":"ContainerStarted","Data":"0a8c86d30134d287b8b88c866a8edf5b551de028862882f5b66c9088a7bae59d"} Jan 28 17:47:15 crc kubenswrapper[4877]: I0128 17:47:15.519499 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kh7dp" podStartSLOduration=3.07295446 podStartE2EDuration="5.519464446s" podCreationTimestamp="2026-01-28 17:47:10 +0000 UTC" firstStartedPulling="2026-01-28 17:47:12.450324273 +0000 UTC m=+4336.008651161" lastFinishedPulling="2026-01-28 17:47:14.896834259 +0000 UTC m=+4338.455161147" observedRunningTime="2026-01-28 17:47:15.514981145 +0000 UTC m=+4339.073308043" watchObservedRunningTime="2026-01-28 17:47:15.519464446 +0000 UTC m=+4339.077791324" Jan 28 17:47:21 crc kubenswrapper[4877]: I0128 17:47:21.092774 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:21 crc kubenswrapper[4877]: I0128 17:47:21.093222 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:21 crc kubenswrapper[4877]: I0128 17:47:21.152607 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:21 crc kubenswrapper[4877]: I0128 17:47:21.638218 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:21 crc kubenswrapper[4877]: I0128 17:47:21.709244 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kh7dp"] Jan 28 17:47:22 crc kubenswrapper[4877]: I0128 17:47:22.332296 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:47:22 crc kubenswrapper[4877]: E0128 17:47:22.332913 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:47:23 crc kubenswrapper[4877]: I0128 17:47:23.599820 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kh7dp" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="registry-server" containerID="cri-o://0a8c86d30134d287b8b88c866a8edf5b551de028862882f5b66c9088a7bae59d" gracePeriod=2 Jan 28 17:47:24 crc kubenswrapper[4877]: I0128 17:47:24.612530 4877 generic.go:334] "Generic (PLEG): container finished" podID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerID="0a8c86d30134d287b8b88c866a8edf5b551de028862882f5b66c9088a7bae59d" exitCode=0 Jan 28 17:47:24 crc kubenswrapper[4877]: I0128 17:47:24.612609 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh7dp" event={"ID":"6220c90b-28e3-4bed-a6bf-7cccff4e2877","Type":"ContainerDied","Data":"0a8c86d30134d287b8b88c866a8edf5b551de028862882f5b66c9088a7bae59d"} Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.406300 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.451810 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-catalog-content\") pod \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.453460 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b24t8\" (UniqueName: \"kubernetes.io/projected/6220c90b-28e3-4bed-a6bf-7cccff4e2877-kube-api-access-b24t8\") pod \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.453612 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-utilities\") pod \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\" (UID: \"6220c90b-28e3-4bed-a6bf-7cccff4e2877\") " Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.456400 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-utilities" (OuterVolumeSpecName: "utilities") pod "6220c90b-28e3-4bed-a6bf-7cccff4e2877" (UID: "6220c90b-28e3-4bed-a6bf-7cccff4e2877"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.462205 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6220c90b-28e3-4bed-a6bf-7cccff4e2877-kube-api-access-b24t8" (OuterVolumeSpecName: "kube-api-access-b24t8") pod "6220c90b-28e3-4bed-a6bf-7cccff4e2877" (UID: "6220c90b-28e3-4bed-a6bf-7cccff4e2877"). InnerVolumeSpecName "kube-api-access-b24t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.513209 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6220c90b-28e3-4bed-a6bf-7cccff4e2877" (UID: "6220c90b-28e3-4bed-a6bf-7cccff4e2877"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.558180 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.558213 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b24t8\" (UniqueName: \"kubernetes.io/projected/6220c90b-28e3-4bed-a6bf-7cccff4e2877-kube-api-access-b24t8\") on node \"crc\" DevicePath \"\"" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.558223 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6220c90b-28e3-4bed-a6bf-7cccff4e2877-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.628436 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh7dp" event={"ID":"6220c90b-28e3-4bed-a6bf-7cccff4e2877","Type":"ContainerDied","Data":"cdee1105a5a5229d415d9629fdd83e5b3cb681cbdc1d0be8c049ec26067c6815"} Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.628540 4877 scope.go:117] "RemoveContainer" containerID="0a8c86d30134d287b8b88c866a8edf5b551de028862882f5b66c9088a7bae59d" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.628593 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh7dp" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.662660 4877 scope.go:117] "RemoveContainer" containerID="e0d79d4fa487badeab8a1b15654a1cbc3d4862acbd7af2d8607bea25b3a5c094" Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.673303 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kh7dp"] Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.686035 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kh7dp"] Jan 28 17:47:25 crc kubenswrapper[4877]: I0128 17:47:25.722729 4877 scope.go:117] "RemoveContainer" containerID="7a49e5544ec8d67fe6f42164fc6e4ae195dec330c530c3312070ccc9200e4556" Jan 28 17:47:27 crc kubenswrapper[4877]: I0128 17:47:27.365250 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" path="/var/lib/kubelet/pods/6220c90b-28e3-4bed-a6bf-7cccff4e2877/volumes" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.280997 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mpkl4"] Jan 28 17:47:31 crc kubenswrapper[4877]: E0128 17:47:31.296217 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="extract-content" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.296486 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="extract-content" Jan 28 17:47:31 crc kubenswrapper[4877]: E0128 17:47:31.296571 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="extract-utilities" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.296625 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="extract-utilities" Jan 28 17:47:31 crc kubenswrapper[4877]: E0128 17:47:31.296965 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="registry-server" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.297026 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="registry-server" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.297326 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="6220c90b-28e3-4bed-a6bf-7cccff4e2877" containerName="registry-server" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.299293 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mpkl4"] Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.299552 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.428685 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8f2w\" (UniqueName: \"kubernetes.io/projected/64038024-d586-4be6-9b2a-22db765561b7-kube-api-access-p8f2w\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.428987 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-utilities\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.429183 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-catalog-content\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.532715 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8f2w\" (UniqueName: \"kubernetes.io/projected/64038024-d586-4be6-9b2a-22db765561b7-kube-api-access-p8f2w\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.532832 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-utilities\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.532897 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-catalog-content\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.533610 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-utilities\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.533762 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-catalog-content\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.563119 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8f2w\" (UniqueName: \"kubernetes.io/projected/64038024-d586-4be6-9b2a-22db765561b7-kube-api-access-p8f2w\") pod \"redhat-operators-mpkl4\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:31 crc kubenswrapper[4877]: I0128 17:47:31.631725 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:32 crc kubenswrapper[4877]: I0128 17:47:32.293970 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mpkl4"] Jan 28 17:47:32 crc kubenswrapper[4877]: I0128 17:47:32.734234 4877 generic.go:334] "Generic (PLEG): container finished" podID="64038024-d586-4be6-9b2a-22db765561b7" containerID="c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994" exitCode=0 Jan 28 17:47:32 crc kubenswrapper[4877]: I0128 17:47:32.734343 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mpkl4" event={"ID":"64038024-d586-4be6-9b2a-22db765561b7","Type":"ContainerDied","Data":"c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994"} Jan 28 17:47:32 crc kubenswrapper[4877]: I0128 17:47:32.734610 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mpkl4" event={"ID":"64038024-d586-4be6-9b2a-22db765561b7","Type":"ContainerStarted","Data":"fb7e12bdd4f236907023f3521ebd87ef4ada8eb32f09a2c52b1a543b6f508d61"} Jan 28 17:47:33 crc kubenswrapper[4877]: I0128 17:47:33.751058 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mpkl4" event={"ID":"64038024-d586-4be6-9b2a-22db765561b7","Type":"ContainerStarted","Data":"94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac"} Jan 28 17:47:35 crc kubenswrapper[4877]: I0128 17:47:35.332018 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:47:35 crc kubenswrapper[4877]: E0128 17:47:35.333286 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:47:43 crc kubenswrapper[4877]: I0128 17:47:43.034226 4877 generic.go:334] "Generic (PLEG): container finished" podID="64038024-d586-4be6-9b2a-22db765561b7" containerID="94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac" exitCode=0 Jan 28 17:47:43 crc kubenswrapper[4877]: I0128 17:47:43.034484 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mpkl4" event={"ID":"64038024-d586-4be6-9b2a-22db765561b7","Type":"ContainerDied","Data":"94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac"} Jan 28 17:47:44 crc kubenswrapper[4877]: I0128 17:47:44.116312 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mpkl4" event={"ID":"64038024-d586-4be6-9b2a-22db765561b7","Type":"ContainerStarted","Data":"b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060"} Jan 28 17:47:44 crc kubenswrapper[4877]: I0128 17:47:44.157208 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mpkl4" podStartSLOduration=2.291161161 podStartE2EDuration="13.157189681s" podCreationTimestamp="2026-01-28 17:47:31 +0000 UTC" firstStartedPulling="2026-01-28 17:47:32.736743038 +0000 UTC m=+4356.295069936" lastFinishedPulling="2026-01-28 17:47:43.602771558 +0000 UTC m=+4367.161098456" observedRunningTime="2026-01-28 17:47:44.153977684 +0000 UTC m=+4367.712304572" watchObservedRunningTime="2026-01-28 17:47:44.157189681 +0000 UTC m=+4367.715516569" Jan 28 17:47:49 crc kubenswrapper[4877]: I0128 17:47:49.330393 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:47:49 crc kubenswrapper[4877]: E0128 17:47:49.331234 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:47:51 crc kubenswrapper[4877]: I0128 17:47:51.632528 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:51 crc kubenswrapper[4877]: I0128 17:47:51.633092 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:47:52 crc kubenswrapper[4877]: I0128 17:47:52.699285 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mpkl4" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="registry-server" probeResult="failure" output=< Jan 28 17:47:52 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:47:52 crc kubenswrapper[4877]: > Jan 28 17:48:01 crc kubenswrapper[4877]: I0128 17:48:01.332732 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:48:01 crc kubenswrapper[4877]: E0128 17:48:01.335271 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:48:02 crc kubenswrapper[4877]: I0128 17:48:02.689798 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mpkl4" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="registry-server" probeResult="failure" output=< Jan 28 17:48:02 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:48:02 crc kubenswrapper[4877]: > Jan 28 17:48:12 crc kubenswrapper[4877]: I0128 17:48:12.695509 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mpkl4" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="registry-server" probeResult="failure" output=< Jan 28 17:48:12 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:48:12 crc kubenswrapper[4877]: > Jan 28 17:48:13 crc kubenswrapper[4877]: I0128 17:48:13.330460 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:48:13 crc kubenswrapper[4877]: E0128 17:48:13.330883 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:48:21 crc kubenswrapper[4877]: I0128 17:48:21.688727 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:48:21 crc kubenswrapper[4877]: I0128 17:48:21.754491 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:48:21 crc kubenswrapper[4877]: I0128 17:48:21.940921 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mpkl4"] Jan 28 17:48:23 crc kubenswrapper[4877]: I0128 17:48:23.617743 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mpkl4" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="registry-server" containerID="cri-o://b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060" gracePeriod=2 Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.281545 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.458407 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-utilities\") pod \"64038024-d586-4be6-9b2a-22db765561b7\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.458531 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8f2w\" (UniqueName: \"kubernetes.io/projected/64038024-d586-4be6-9b2a-22db765561b7-kube-api-access-p8f2w\") pod \"64038024-d586-4be6-9b2a-22db765561b7\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.458761 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-catalog-content\") pod \"64038024-d586-4be6-9b2a-22db765561b7\" (UID: \"64038024-d586-4be6-9b2a-22db765561b7\") " Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.459520 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-utilities" (OuterVolumeSpecName: "utilities") pod "64038024-d586-4be6-9b2a-22db765561b7" (UID: "64038024-d586-4be6-9b2a-22db765561b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.459986 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.484910 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64038024-d586-4be6-9b2a-22db765561b7-kube-api-access-p8f2w" (OuterVolumeSpecName: "kube-api-access-p8f2w") pod "64038024-d586-4be6-9b2a-22db765561b7" (UID: "64038024-d586-4be6-9b2a-22db765561b7"). InnerVolumeSpecName "kube-api-access-p8f2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.562417 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8f2w\" (UniqueName: \"kubernetes.io/projected/64038024-d586-4be6-9b2a-22db765561b7-kube-api-access-p8f2w\") on node \"crc\" DevicePath \"\"" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.616877 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64038024-d586-4be6-9b2a-22db765561b7" (UID: "64038024-d586-4be6-9b2a-22db765561b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.629451 4877 generic.go:334] "Generic (PLEG): container finished" podID="64038024-d586-4be6-9b2a-22db765561b7" containerID="b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060" exitCode=0 Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.629524 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mpkl4" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.629524 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mpkl4" event={"ID":"64038024-d586-4be6-9b2a-22db765561b7","Type":"ContainerDied","Data":"b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060"} Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.629601 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mpkl4" event={"ID":"64038024-d586-4be6-9b2a-22db765561b7","Type":"ContainerDied","Data":"fb7e12bdd4f236907023f3521ebd87ef4ada8eb32f09a2c52b1a543b6f508d61"} Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.629625 4877 scope.go:117] "RemoveContainer" containerID="b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.667370 4877 scope.go:117] "RemoveContainer" containerID="94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.669137 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64038024-d586-4be6-9b2a-22db765561b7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.681405 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mpkl4"] Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.700733 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mpkl4"] Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.713359 4877 scope.go:117] "RemoveContainer" containerID="c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.759745 4877 scope.go:117] "RemoveContainer" containerID="b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060" Jan 28 17:48:24 crc kubenswrapper[4877]: E0128 17:48:24.760989 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060\": container with ID starting with b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060 not found: ID does not exist" containerID="b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.761047 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060"} err="failed to get container status \"b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060\": rpc error: code = NotFound desc = could not find container \"b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060\": container with ID starting with b2c60aaec7efcc8a3686f7b76ced5e1f16280776862efa7722a8d658e6a76060 not found: ID does not exist" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.761083 4877 scope.go:117] "RemoveContainer" containerID="94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac" Jan 28 17:48:24 crc kubenswrapper[4877]: E0128 17:48:24.761653 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac\": container with ID starting with 94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac not found: ID does not exist" containerID="94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.761687 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac"} err="failed to get container status \"94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac\": rpc error: code = NotFound desc = could not find container \"94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac\": container with ID starting with 94841f3defd9c738e96b5bcfa4a621579c55635057ba96312759e4e4407f32ac not found: ID does not exist" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.761708 4877 scope.go:117] "RemoveContainer" containerID="c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994" Jan 28 17:48:24 crc kubenswrapper[4877]: E0128 17:48:24.762196 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994\": container with ID starting with c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994 not found: ID does not exist" containerID="c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994" Jan 28 17:48:24 crc kubenswrapper[4877]: I0128 17:48:24.762227 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994"} err="failed to get container status \"c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994\": rpc error: code = NotFound desc = could not find container \"c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994\": container with ID starting with c622ff37b9d3fd9f946abbf60751be7b76a7c8a7a4b89c253f9927bdc042e994 not found: ID does not exist" Jan 28 17:48:25 crc kubenswrapper[4877]: I0128 17:48:25.332890 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:48:25 crc kubenswrapper[4877]: E0128 17:48:25.333192 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:48:25 crc kubenswrapper[4877]: I0128 17:48:25.346920 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64038024-d586-4be6-9b2a-22db765561b7" path="/var/lib/kubelet/pods/64038024-d586-4be6-9b2a-22db765561b7/volumes" Jan 28 17:48:37 crc kubenswrapper[4877]: I0128 17:48:37.340914 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:48:37 crc kubenswrapper[4877]: E0128 17:48:37.341760 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:48:49 crc kubenswrapper[4877]: I0128 17:48:49.331363 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:48:49 crc kubenswrapper[4877]: E0128 17:48:49.335253 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:49:01 crc kubenswrapper[4877]: I0128 17:49:01.330371 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:49:01 crc kubenswrapper[4877]: E0128 17:49:01.331319 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:49:13 crc kubenswrapper[4877]: I0128 17:49:13.332448 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:49:13 crc kubenswrapper[4877]: E0128 17:49:13.333846 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:49:26 crc kubenswrapper[4877]: I0128 17:49:26.330408 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:49:26 crc kubenswrapper[4877]: E0128 17:49:26.331175 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:49:40 crc kubenswrapper[4877]: I0128 17:49:40.330980 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:49:40 crc kubenswrapper[4877]: E0128 17:49:40.332888 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:49:54 crc kubenswrapper[4877]: I0128 17:49:54.331568 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:49:54 crc kubenswrapper[4877]: E0128 17:49:54.332439 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:50:09 crc kubenswrapper[4877]: I0128 17:50:09.335269 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:50:09 crc kubenswrapper[4877]: E0128 17:50:09.336777 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:50:24 crc kubenswrapper[4877]: I0128 17:50:24.330848 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:50:24 crc kubenswrapper[4877]: E0128 17:50:24.331760 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:50:36 crc kubenswrapper[4877]: I0128 17:50:36.331890 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:50:36 crc kubenswrapper[4877]: E0128 17:50:36.332641 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:50:50 crc kubenswrapper[4877]: I0128 17:50:50.338431 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:50:50 crc kubenswrapper[4877]: E0128 17:50:50.340440 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:51:02 crc kubenswrapper[4877]: I0128 17:51:02.330826 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:51:02 crc kubenswrapper[4877]: E0128 17:51:02.331654 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:51:16 crc kubenswrapper[4877]: I0128 17:51:16.332709 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:51:16 crc kubenswrapper[4877]: E0128 17:51:16.334377 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:51:29 crc kubenswrapper[4877]: I0128 17:51:29.331533 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:51:29 crc kubenswrapper[4877]: E0128 17:51:29.332535 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:51:37 crc kubenswrapper[4877]: I0128 17:51:37.065000 4877 patch_prober.go:28] interesting pod/metrics-server-fbbd74554-qkt8l container/metrics-server namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 17:51:37 crc kubenswrapper[4877]: I0128 17:51:37.065533 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 17:51:41 crc kubenswrapper[4877]: I0128 17:51:41.330391 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:51:41 crc kubenswrapper[4877]: E0128 17:51:41.331208 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:51:55 crc kubenswrapper[4877]: I0128 17:51:55.335970 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:51:55 crc kubenswrapper[4877]: E0128 17:51:55.336882 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:52:07 crc kubenswrapper[4877]: I0128 17:52:07.330698 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:52:07 crc kubenswrapper[4877]: I0128 17:52:07.927356 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"f2a7976769827c4b163e30fcd691bce52645f79c730e8a24b73c81dabc882f92"} Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.031904 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5wggd"] Jan 28 17:52:18 crc kubenswrapper[4877]: E0128 17:52:18.033425 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="extract-utilities" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.033444 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="extract-utilities" Jan 28 17:52:18 crc kubenswrapper[4877]: E0128 17:52:18.033495 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="registry-server" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.033504 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="registry-server" Jan 28 17:52:18 crc kubenswrapper[4877]: E0128 17:52:18.033532 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="extract-content" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.033541 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="extract-content" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.033817 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="64038024-d586-4be6-9b2a-22db765561b7" containerName="registry-server" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.036042 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.049718 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wggd"] Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.182945 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qr9xk\" (UniqueName: \"kubernetes.io/projected/1d3721ee-fb0f-42b9-902e-67be8ddf520e-kube-api-access-qr9xk\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.183002 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-utilities\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.183069 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-catalog-content\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.285658 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qr9xk\" (UniqueName: \"kubernetes.io/projected/1d3721ee-fb0f-42b9-902e-67be8ddf520e-kube-api-access-qr9xk\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.286038 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-utilities\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.286141 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-catalog-content\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.286699 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-utilities\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.286761 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-catalog-content\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.319658 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qr9xk\" (UniqueName: \"kubernetes.io/projected/1d3721ee-fb0f-42b9-902e-67be8ddf520e-kube-api-access-qr9xk\") pod \"redhat-marketplace-5wggd\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.367124 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:18 crc kubenswrapper[4877]: I0128 17:52:18.970755 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wggd"] Jan 28 17:52:19 crc kubenswrapper[4877]: I0128 17:52:19.053587 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wggd" event={"ID":"1d3721ee-fb0f-42b9-902e-67be8ddf520e","Type":"ContainerStarted","Data":"365007d639eef40e32b354859a5c9f9115f159aba3fd52eff4d1d9d3e1961a0a"} Jan 28 17:52:20 crc kubenswrapper[4877]: I0128 17:52:20.067434 4877 generic.go:334] "Generic (PLEG): container finished" podID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerID="d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a" exitCode=0 Jan 28 17:52:20 crc kubenswrapper[4877]: I0128 17:52:20.067549 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wggd" event={"ID":"1d3721ee-fb0f-42b9-902e-67be8ddf520e","Type":"ContainerDied","Data":"d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a"} Jan 28 17:52:20 crc kubenswrapper[4877]: I0128 17:52:20.070953 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:52:22 crc kubenswrapper[4877]: I0128 17:52:22.099038 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wggd" event={"ID":"1d3721ee-fb0f-42b9-902e-67be8ddf520e","Type":"ContainerStarted","Data":"7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812"} Jan 28 17:52:24 crc kubenswrapper[4877]: I0128 17:52:24.122832 4877 generic.go:334] "Generic (PLEG): container finished" podID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerID="7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812" exitCode=0 Jan 28 17:52:24 crc kubenswrapper[4877]: I0128 17:52:24.122932 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wggd" event={"ID":"1d3721ee-fb0f-42b9-902e-67be8ddf520e","Type":"ContainerDied","Data":"7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812"} Jan 28 17:52:26 crc kubenswrapper[4877]: I0128 17:52:26.146195 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wggd" event={"ID":"1d3721ee-fb0f-42b9-902e-67be8ddf520e","Type":"ContainerStarted","Data":"019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a"} Jan 28 17:52:26 crc kubenswrapper[4877]: I0128 17:52:26.167176 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5wggd" podStartSLOduration=3.339636342 podStartE2EDuration="8.167158424s" podCreationTimestamp="2026-01-28 17:52:18 +0000 UTC" firstStartedPulling="2026-01-28 17:52:20.070749441 +0000 UTC m=+4643.629076329" lastFinishedPulling="2026-01-28 17:52:24.898271513 +0000 UTC m=+4648.456598411" observedRunningTime="2026-01-28 17:52:26.166385604 +0000 UTC m=+4649.724712512" watchObservedRunningTime="2026-01-28 17:52:26.167158424 +0000 UTC m=+4649.725485312" Jan 28 17:52:28 crc kubenswrapper[4877]: I0128 17:52:28.367754 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:28 crc kubenswrapper[4877]: I0128 17:52:28.368185 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:28 crc kubenswrapper[4877]: I0128 17:52:28.423192 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:38 crc kubenswrapper[4877]: I0128 17:52:38.425374 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:38 crc kubenswrapper[4877]: I0128 17:52:38.488254 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wggd"] Jan 28 17:52:39 crc kubenswrapper[4877]: I0128 17:52:39.287429 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5wggd" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="registry-server" containerID="cri-o://019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a" gracePeriod=2 Jan 28 17:52:39 crc kubenswrapper[4877]: I0128 17:52:39.899824 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.018536 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-catalog-content\") pod \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.018574 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-utilities\") pod \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.018629 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qr9xk\" (UniqueName: \"kubernetes.io/projected/1d3721ee-fb0f-42b9-902e-67be8ddf520e-kube-api-access-qr9xk\") pod \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\" (UID: \"1d3721ee-fb0f-42b9-902e-67be8ddf520e\") " Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.019705 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-utilities" (OuterVolumeSpecName: "utilities") pod "1d3721ee-fb0f-42b9-902e-67be8ddf520e" (UID: "1d3721ee-fb0f-42b9-902e-67be8ddf520e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.027434 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d3721ee-fb0f-42b9-902e-67be8ddf520e-kube-api-access-qr9xk" (OuterVolumeSpecName: "kube-api-access-qr9xk") pod "1d3721ee-fb0f-42b9-902e-67be8ddf520e" (UID: "1d3721ee-fb0f-42b9-902e-67be8ddf520e"). InnerVolumeSpecName "kube-api-access-qr9xk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.042669 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d3721ee-fb0f-42b9-902e-67be8ddf520e" (UID: "1d3721ee-fb0f-42b9-902e-67be8ddf520e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.122388 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.122783 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d3721ee-fb0f-42b9-902e-67be8ddf520e-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.122797 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qr9xk\" (UniqueName: \"kubernetes.io/projected/1d3721ee-fb0f-42b9-902e-67be8ddf520e-kube-api-access-qr9xk\") on node \"crc\" DevicePath \"\"" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.302293 4877 generic.go:334] "Generic (PLEG): container finished" podID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerID="019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a" exitCode=0 Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.302356 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wggd" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.302355 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wggd" event={"ID":"1d3721ee-fb0f-42b9-902e-67be8ddf520e","Type":"ContainerDied","Data":"019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a"} Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.302525 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wggd" event={"ID":"1d3721ee-fb0f-42b9-902e-67be8ddf520e","Type":"ContainerDied","Data":"365007d639eef40e32b354859a5c9f9115f159aba3fd52eff4d1d9d3e1961a0a"} Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.302565 4877 scope.go:117] "RemoveContainer" containerID="019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.338563 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wggd"] Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.350721 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wggd"] Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.352412 4877 scope.go:117] "RemoveContainer" containerID="7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.373729 4877 scope.go:117] "RemoveContainer" containerID="d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.437164 4877 scope.go:117] "RemoveContainer" containerID="019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a" Jan 28 17:52:40 crc kubenswrapper[4877]: E0128 17:52:40.437673 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a\": container with ID starting with 019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a not found: ID does not exist" containerID="019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.437719 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a"} err="failed to get container status \"019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a\": rpc error: code = NotFound desc = could not find container \"019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a\": container with ID starting with 019545e756a785109dc6bf3aa878488fb1dc20218cf2778ba89cfa02f6972e5a not found: ID does not exist" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.437750 4877 scope.go:117] "RemoveContainer" containerID="7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812" Jan 28 17:52:40 crc kubenswrapper[4877]: E0128 17:52:40.438366 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812\": container with ID starting with 7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812 not found: ID does not exist" containerID="7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.438447 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812"} err="failed to get container status \"7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812\": rpc error: code = NotFound desc = could not find container \"7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812\": container with ID starting with 7d1a07fa4f48e21fc63e6a7f04987e2c3e30b667cb462cf1d9bc9ddd33b20812 not found: ID does not exist" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.438660 4877 scope.go:117] "RemoveContainer" containerID="d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a" Jan 28 17:52:40 crc kubenswrapper[4877]: E0128 17:52:40.439185 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a\": container with ID starting with d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a not found: ID does not exist" containerID="d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a" Jan 28 17:52:40 crc kubenswrapper[4877]: I0128 17:52:40.439250 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a"} err="failed to get container status \"d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a\": rpc error: code = NotFound desc = could not find container \"d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a\": container with ID starting with d7411c1efb8959eb24e2ac7179186b5f28a4e9e8e613b28edf253b49cae4575a not found: ID does not exist" Jan 28 17:52:41 crc kubenswrapper[4877]: I0128 17:52:41.344881 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" path="/var/lib/kubelet/pods/1d3721ee-fb0f-42b9-902e-67be8ddf520e/volumes" Jan 28 17:54:07 crc kubenswrapper[4877]: I0128 17:54:07.076583 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:54:07 crc kubenswrapper[4877]: I0128 17:54:07.077987 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:54:37 crc kubenswrapper[4877]: I0128 17:54:37.076446 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:54:37 crc kubenswrapper[4877]: I0128 17:54:37.076965 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:55:07 crc kubenswrapper[4877]: I0128 17:55:07.076920 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:55:07 crc kubenswrapper[4877]: I0128 17:55:07.077501 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:55:07 crc kubenswrapper[4877]: I0128 17:55:07.077560 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:55:07 crc kubenswrapper[4877]: I0128 17:55:07.078651 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f2a7976769827c4b163e30fcd691bce52645f79c730e8a24b73c81dabc882f92"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:55:07 crc kubenswrapper[4877]: I0128 17:55:07.078851 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://f2a7976769827c4b163e30fcd691bce52645f79c730e8a24b73c81dabc882f92" gracePeriod=600 Jan 28 17:55:08 crc kubenswrapper[4877]: I0128 17:55:08.013933 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="f2a7976769827c4b163e30fcd691bce52645f79c730e8a24b73c81dabc882f92" exitCode=0 Jan 28 17:55:08 crc kubenswrapper[4877]: I0128 17:55:08.014338 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"f2a7976769827c4b163e30fcd691bce52645f79c730e8a24b73c81dabc882f92"} Jan 28 17:55:08 crc kubenswrapper[4877]: I0128 17:55:08.014425 4877 scope.go:117] "RemoveContainer" containerID="ce2729a41a255c7045ba5451d1067fa8321dfd624fede9c64fe3a9ea0b3a656b" Jan 28 17:55:09 crc kubenswrapper[4877]: I0128 17:55:09.029346 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a"} Jan 28 17:57:37 crc kubenswrapper[4877]: I0128 17:57:37.076410 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:57:37 crc kubenswrapper[4877]: I0128 17:57:37.076982 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.340235 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 28 17:57:46 crc kubenswrapper[4877]: E0128 17:57:46.341616 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="extract-content" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.341641 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="extract-content" Jan 28 17:57:46 crc kubenswrapper[4877]: E0128 17:57:46.341676 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="registry-server" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.341686 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="registry-server" Jan 28 17:57:46 crc kubenswrapper[4877]: E0128 17:57:46.341750 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="extract-utilities" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.341763 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="extract-utilities" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.342146 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d3721ee-fb0f-42b9-902e-67be8ddf520e" containerName="registry-server" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.343451 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.345380 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.345947 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.345947 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-m5q9r" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.346416 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.352770 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.464932 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465028 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465072 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-config-data\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465101 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465350 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465399 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465614 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6vxf\" (UniqueName: \"kubernetes.io/projected/979f7960-d78e-4f7d-b68b-757e70ac5378-kube-api-access-v6vxf\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465678 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.465826 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.568794 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.568867 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.568898 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-config-data\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.568918 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.569008 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.569036 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.569091 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6vxf\" (UniqueName: \"kubernetes.io/projected/979f7960-d78e-4f7d-b68b-757e70ac5378-kube-api-access-v6vxf\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.569126 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.569176 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.570956 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.571134 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.571648 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-config-data\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.571708 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.573153 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.576100 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.576246 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.580146 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.591437 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6vxf\" (UniqueName: \"kubernetes.io/projected/979f7960-d78e-4f7d-b68b-757e70ac5378-kube-api-access-v6vxf\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.612127 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " pod="openstack/tempest-tests-tempest" Jan 28 17:57:46 crc kubenswrapper[4877]: I0128 17:57:46.668834 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 17:57:47 crc kubenswrapper[4877]: I0128 17:57:47.565995 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 28 17:57:47 crc kubenswrapper[4877]: I0128 17:57:47.582153 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 17:57:47 crc kubenswrapper[4877]: I0128 17:57:47.826500 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"979f7960-d78e-4f7d-b68b-757e70ac5378","Type":"ContainerStarted","Data":"2c935c665f00dd76f04f203bd82cc568e3029ff8016a7d609d2339581936bbee"} Jan 28 17:57:54 crc kubenswrapper[4877]: I0128 17:57:54.220835 4877 trace.go:236] Trace[167647937]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/community-operators-s8fsq" (28-Jan-2026 17:57:52.828) (total time: 1361ms): Jan 28 17:57:54 crc kubenswrapper[4877]: Trace[167647937]: [1.361354987s] [1.361354987s] END Jan 28 17:58:07 crc kubenswrapper[4877]: I0128 17:58:07.075964 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:58:07 crc kubenswrapper[4877]: I0128 17:58:07.076515 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.088155 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nrw7c"] Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.091537 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.104746 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nrw7c"] Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.162922 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-utilities\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.163003 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jglh7\" (UniqueName: \"kubernetes.io/projected/67525667-217c-4c1c-b182-6dffcc277ff6-kube-api-access-jglh7\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.163029 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-catalog-content\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.265638 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-utilities\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.265743 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jglh7\" (UniqueName: \"kubernetes.io/projected/67525667-217c-4c1c-b182-6dffcc277ff6-kube-api-access-jglh7\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.265775 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-catalog-content\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.266242 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-utilities\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.266685 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-catalog-content\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:08 crc kubenswrapper[4877]: I0128 17:58:08.855601 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jglh7\" (UniqueName: \"kubernetes.io/projected/67525667-217c-4c1c-b182-6dffcc277ff6-kube-api-access-jglh7\") pod \"community-operators-nrw7c\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:09 crc kubenswrapper[4877]: I0128 17:58:09.023173 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.547384 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9lmsk"] Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.553309 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.561902 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9lmsk"] Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.679839 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-utilities\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.680170 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg8b4\" (UniqueName: \"kubernetes.io/projected/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-kube-api-access-lg8b4\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.680819 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-catalog-content\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.783160 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-utilities\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.783469 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg8b4\" (UniqueName: \"kubernetes.io/projected/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-kube-api-access-lg8b4\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.783742 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-catalog-content\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.784219 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-catalog-content\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.784231 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-utilities\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.814214 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg8b4\" (UniqueName: \"kubernetes.io/projected/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-kube-api-access-lg8b4\") pod \"redhat-operators-9lmsk\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:11 crc kubenswrapper[4877]: I0128 17:58:11.900070 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.076111 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.076627 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.076686 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.078561 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.078637 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" gracePeriod=600 Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.454318 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" exitCode=0 Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.454536 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a"} Jan 28 17:58:37 crc kubenswrapper[4877]: I0128 17:58:37.454733 4877 scope.go:117] "RemoveContainer" containerID="f2a7976769827c4b163e30fcd691bce52645f79c730e8a24b73c81dabc882f92" Jan 28 17:58:48 crc kubenswrapper[4877]: E0128 17:58:48.125268 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/rpm-ostreed.service\": RecentStats: unable to find data in memory cache]" Jan 28 17:58:48 crc kubenswrapper[4877]: E0128 17:58:48.244338 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:58:48 crc kubenswrapper[4877]: E0128 17:58:48.417373 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Jan 28 17:58:48 crc kubenswrapper[4877]: E0128 17:58:48.436895 4877 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v6vxf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(979f7960-d78e-4f7d-b68b-757e70ac5378): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 17:58:48 crc kubenswrapper[4877]: E0128 17:58:48.438108 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="979f7960-d78e-4f7d-b68b-757e70ac5378" Jan 28 17:58:48 crc kubenswrapper[4877]: I0128 17:58:48.608131 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 17:58:48 crc kubenswrapper[4877]: E0128 17:58:48.608448 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:58:48 crc kubenswrapper[4877]: E0128 17:58:48.608926 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="979f7960-d78e-4f7d-b68b-757e70ac5378" Jan 28 17:58:49 crc kubenswrapper[4877]: I0128 17:58:49.694657 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nrw7c"] Jan 28 17:58:49 crc kubenswrapper[4877]: I0128 17:58:49.707689 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9lmsk"] Jan 28 17:58:50 crc kubenswrapper[4877]: I0128 17:58:50.643022 4877 generic.go:334] "Generic (PLEG): container finished" podID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerID="7f91075cf2fd2d1c9bfd42c0a78fce631b4943a2aec863ff73da1297a04abd3d" exitCode=0 Jan 28 17:58:50 crc kubenswrapper[4877]: I0128 17:58:50.643581 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerDied","Data":"7f91075cf2fd2d1c9bfd42c0a78fce631b4943a2aec863ff73da1297a04abd3d"} Jan 28 17:58:50 crc kubenswrapper[4877]: I0128 17:58:50.643754 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerStarted","Data":"4d63a2f783f7e614eedd682e19cf2d3110394455e88a89e9fd299074d9c2fb81"} Jan 28 17:58:50 crc kubenswrapper[4877]: I0128 17:58:50.649370 4877 generic.go:334] "Generic (PLEG): container finished" podID="67525667-217c-4c1c-b182-6dffcc277ff6" containerID="ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5" exitCode=0 Jan 28 17:58:50 crc kubenswrapper[4877]: I0128 17:58:50.649425 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrw7c" event={"ID":"67525667-217c-4c1c-b182-6dffcc277ff6","Type":"ContainerDied","Data":"ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5"} Jan 28 17:58:50 crc kubenswrapper[4877]: I0128 17:58:50.649458 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrw7c" event={"ID":"67525667-217c-4c1c-b182-6dffcc277ff6","Type":"ContainerStarted","Data":"dc98338be3a969725ca53117a46c6917637caf3759b0bd989cbf7b4e8779ba5c"} Jan 28 17:58:52 crc kubenswrapper[4877]: I0128 17:58:52.673411 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerStarted","Data":"86b58765adba59cec0c76c3b343cf4dec07956b04467203cedbce9c55e07a440"} Jan 28 17:58:52 crc kubenswrapper[4877]: I0128 17:58:52.676617 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrw7c" event={"ID":"67525667-217c-4c1c-b182-6dffcc277ff6","Type":"ContainerStarted","Data":"ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a"} Jan 28 17:59:03 crc kubenswrapper[4877]: I0128 17:59:03.809860 4877 generic.go:334] "Generic (PLEG): container finished" podID="67525667-217c-4c1c-b182-6dffcc277ff6" containerID="ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a" exitCode=0 Jan 28 17:59:03 crc kubenswrapper[4877]: I0128 17:59:03.810314 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrw7c" event={"ID":"67525667-217c-4c1c-b182-6dffcc277ff6","Type":"ContainerDied","Data":"ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a"} Jan 28 17:59:04 crc kubenswrapper[4877]: I0128 17:59:04.331203 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 17:59:04 crc kubenswrapper[4877]: E0128 17:59:04.331663 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:59:15 crc kubenswrapper[4877]: I0128 17:59:15.331998 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 17:59:15 crc kubenswrapper[4877]: E0128 17:59:15.333470 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:59:15 crc kubenswrapper[4877]: I0128 17:59:15.729559 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 28 17:59:18 crc kubenswrapper[4877]: I0128 17:59:18.010893 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrw7c" event={"ID":"67525667-217c-4c1c-b182-6dffcc277ff6","Type":"ContainerStarted","Data":"be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10"} Jan 28 17:59:18 crc kubenswrapper[4877]: I0128 17:59:18.102824 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nrw7c" podStartSLOduration=45.027020125 podStartE2EDuration="1m10.10280146s" podCreationTimestamp="2026-01-28 17:58:08 +0000 UTC" firstStartedPulling="2026-01-28 17:58:50.651652502 +0000 UTC m=+5034.209979400" lastFinishedPulling="2026-01-28 17:59:15.727433847 +0000 UTC m=+5059.285760735" observedRunningTime="2026-01-28 17:59:18.100910209 +0000 UTC m=+5061.659237107" watchObservedRunningTime="2026-01-28 17:59:18.10280146 +0000 UTC m=+5061.661128348" Jan 28 17:59:19 crc kubenswrapper[4877]: I0128 17:59:19.024575 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:59:19 crc kubenswrapper[4877]: I0128 17:59:19.024964 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:59:20 crc kubenswrapper[4877]: I0128 17:59:20.133773 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nrw7c" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" probeResult="failure" output=< Jan 28 17:59:20 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:59:20 crc kubenswrapper[4877]: > Jan 28 17:59:22 crc kubenswrapper[4877]: I0128 17:59:22.070522 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"979f7960-d78e-4f7d-b68b-757e70ac5378","Type":"ContainerStarted","Data":"d92475227e59e8acf98f8bbb95d2a57b0ce2be563d3c609e4d476d456393fdc5"} Jan 28 17:59:22 crc kubenswrapper[4877]: I0128 17:59:22.092021 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=8.947428725 podStartE2EDuration="1m37.091997417s" podCreationTimestamp="2026-01-28 17:57:45 +0000 UTC" firstStartedPulling="2026-01-28 17:57:47.58192792 +0000 UTC m=+4971.140254818" lastFinishedPulling="2026-01-28 17:59:15.726496622 +0000 UTC m=+5059.284823510" observedRunningTime="2026-01-28 17:59:22.085860101 +0000 UTC m=+5065.644186989" watchObservedRunningTime="2026-01-28 17:59:22.091997417 +0000 UTC m=+5065.650324325" Jan 28 17:59:24 crc kubenswrapper[4877]: I0128 17:59:24.094539 4877 generic.go:334] "Generic (PLEG): container finished" podID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerID="86b58765adba59cec0c76c3b343cf4dec07956b04467203cedbce9c55e07a440" exitCode=0 Jan 28 17:59:24 crc kubenswrapper[4877]: I0128 17:59:24.095047 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerDied","Data":"86b58765adba59cec0c76c3b343cf4dec07956b04467203cedbce9c55e07a440"} Jan 28 17:59:26 crc kubenswrapper[4877]: I0128 17:59:26.140384 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerStarted","Data":"24f13f5541ec41b3d46499d7abef67bd3697d72343bff75dad48f6a125abd07d"} Jan 28 17:59:26 crc kubenswrapper[4877]: I0128 17:59:26.164441 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9lmsk" podStartSLOduration=40.961535317 podStartE2EDuration="1m15.164421364s" podCreationTimestamp="2026-01-28 17:58:11 +0000 UTC" firstStartedPulling="2026-01-28 17:58:50.645381402 +0000 UTC m=+5034.203708300" lastFinishedPulling="2026-01-28 17:59:24.848267459 +0000 UTC m=+5068.406594347" observedRunningTime="2026-01-28 17:59:26.158818482 +0000 UTC m=+5069.717145400" watchObservedRunningTime="2026-01-28 17:59:26.164421364 +0000 UTC m=+5069.722748252" Jan 28 17:59:27 crc kubenswrapper[4877]: I0128 17:59:27.339301 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 17:59:27 crc kubenswrapper[4877]: E0128 17:59:27.340051 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:59:30 crc kubenswrapper[4877]: I0128 17:59:30.069291 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nrw7c" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" probeResult="failure" output=< Jan 28 17:59:30 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:59:30 crc kubenswrapper[4877]: > Jan 28 17:59:31 crc kubenswrapper[4877]: I0128 17:59:31.900812 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:59:31 crc kubenswrapper[4877]: I0128 17:59:31.901171 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 17:59:32 crc kubenswrapper[4877]: I0128 17:59:32.948079 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 17:59:32 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:59:32 crc kubenswrapper[4877]: > Jan 28 17:59:40 crc kubenswrapper[4877]: I0128 17:59:40.079875 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nrw7c" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" probeResult="failure" output=< Jan 28 17:59:40 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:59:40 crc kubenswrapper[4877]: > Jan 28 17:59:41 crc kubenswrapper[4877]: I0128 17:59:41.333400 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 17:59:41 crc kubenswrapper[4877]: E0128 17:59:41.334114 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:59:42 crc kubenswrapper[4877]: I0128 17:59:42.959862 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 17:59:42 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:59:42 crc kubenswrapper[4877]: > Jan 28 17:59:50 crc kubenswrapper[4877]: I0128 17:59:50.807445 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nrw7c" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" probeResult="failure" output=< Jan 28 17:59:50 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:59:50 crc kubenswrapper[4877]: > Jan 28 17:59:52 crc kubenswrapper[4877]: I0128 17:59:52.950579 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 17:59:52 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 17:59:52 crc kubenswrapper[4877]: > Jan 28 17:59:54 crc kubenswrapper[4877]: I0128 17:59:54.330538 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 17:59:54 crc kubenswrapper[4877]: E0128 17:59:54.331180 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 17:59:59 crc kubenswrapper[4877]: I0128 17:59:59.084878 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:59:59 crc kubenswrapper[4877]: I0128 17:59:59.143555 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 17:59:59 crc kubenswrapper[4877]: I0128 17:59:59.348282 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nrw7c"] Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.174121 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq"] Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.176260 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.178552 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.178817 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.201231 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq"] Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.343812 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4bc3d006-7d31-436b-bf95-a1441a3d448c-secret-volume\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.345042 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bc3d006-7d31-436b-bf95-a1441a3d448c-config-volume\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.345192 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxx2f\" (UniqueName: \"kubernetes.io/projected/4bc3d006-7d31-436b-bf95-a1441a3d448c-kube-api-access-rxx2f\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.448030 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bc3d006-7d31-436b-bf95-a1441a3d448c-config-volume\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.448317 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxx2f\" (UniqueName: \"kubernetes.io/projected/4bc3d006-7d31-436b-bf95-a1441a3d448c-kube-api-access-rxx2f\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.448513 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4bc3d006-7d31-436b-bf95-a1441a3d448c-secret-volume\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.449703 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bc3d006-7d31-436b-bf95-a1441a3d448c-config-volume\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.459123 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4bc3d006-7d31-436b-bf95-a1441a3d448c-secret-volume\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.467066 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxx2f\" (UniqueName: \"kubernetes.io/projected/4bc3d006-7d31-436b-bf95-a1441a3d448c-kube-api-access-rxx2f\") pod \"collect-profiles-29493720-n2pjq\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.500669 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:00 crc kubenswrapper[4877]: I0128 18:00:00.532088 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nrw7c" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" containerID="cri-o://be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10" gracePeriod=2 Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.041973 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq"] Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.160378 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.271419 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jglh7\" (UniqueName: \"kubernetes.io/projected/67525667-217c-4c1c-b182-6dffcc277ff6-kube-api-access-jglh7\") pod \"67525667-217c-4c1c-b182-6dffcc277ff6\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.271755 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-catalog-content\") pod \"67525667-217c-4c1c-b182-6dffcc277ff6\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.271824 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-utilities\") pod \"67525667-217c-4c1c-b182-6dffcc277ff6\" (UID: \"67525667-217c-4c1c-b182-6dffcc277ff6\") " Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.272334 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-utilities" (OuterVolumeSpecName: "utilities") pod "67525667-217c-4c1c-b182-6dffcc277ff6" (UID: "67525667-217c-4c1c-b182-6dffcc277ff6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.272664 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.284750 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67525667-217c-4c1c-b182-6dffcc277ff6-kube-api-access-jglh7" (OuterVolumeSpecName: "kube-api-access-jglh7") pod "67525667-217c-4c1c-b182-6dffcc277ff6" (UID: "67525667-217c-4c1c-b182-6dffcc277ff6"). InnerVolumeSpecName "kube-api-access-jglh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.325141 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67525667-217c-4c1c-b182-6dffcc277ff6" (UID: "67525667-217c-4c1c-b182-6dffcc277ff6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.375421 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jglh7\" (UniqueName: \"kubernetes.io/projected/67525667-217c-4c1c-b182-6dffcc277ff6-kube-api-access-jglh7\") on node \"crc\" DevicePath \"\"" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.375461 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67525667-217c-4c1c-b182-6dffcc277ff6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.542096 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" event={"ID":"4bc3d006-7d31-436b-bf95-a1441a3d448c","Type":"ContainerStarted","Data":"c773f5a59026e73e5ff0fabc8ef046f4f810981d575e4c71b2c6978fe03b6620"} Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.544317 4877 generic.go:334] "Generic (PLEG): container finished" podID="67525667-217c-4c1c-b182-6dffcc277ff6" containerID="be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10" exitCode=0 Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.544373 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrw7c" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.544364 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrw7c" event={"ID":"67525667-217c-4c1c-b182-6dffcc277ff6","Type":"ContainerDied","Data":"be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10"} Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.544516 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrw7c" event={"ID":"67525667-217c-4c1c-b182-6dffcc277ff6","Type":"ContainerDied","Data":"dc98338be3a969725ca53117a46c6917637caf3759b0bd989cbf7b4e8779ba5c"} Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.544539 4877 scope.go:117] "RemoveContainer" containerID="be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.577470 4877 scope.go:117] "RemoveContainer" containerID="ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.577469 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nrw7c"] Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.591918 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nrw7c"] Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.601178 4877 scope.go:117] "RemoveContainer" containerID="ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.661510 4877 scope.go:117] "RemoveContainer" containerID="be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10" Jan 28 18:00:01 crc kubenswrapper[4877]: E0128 18:00:01.661994 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10\": container with ID starting with be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10 not found: ID does not exist" containerID="be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.662037 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10"} err="failed to get container status \"be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10\": rpc error: code = NotFound desc = could not find container \"be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10\": container with ID starting with be92a4b8eae55a0aa76a27c9e565dc98ec80783b1c822cef6bea7cbbdc7f1c10 not found: ID does not exist" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.662065 4877 scope.go:117] "RemoveContainer" containerID="ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a" Jan 28 18:00:01 crc kubenswrapper[4877]: E0128 18:00:01.662444 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a\": container with ID starting with ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a not found: ID does not exist" containerID="ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.662493 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a"} err="failed to get container status \"ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a\": rpc error: code = NotFound desc = could not find container \"ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a\": container with ID starting with ded15300946f99a7205ec463663e1b4787a9d0e3c94a80d48e732f543af00b0a not found: ID does not exist" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.662517 4877 scope.go:117] "RemoveContainer" containerID="ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5" Jan 28 18:00:01 crc kubenswrapper[4877]: E0128 18:00:01.662825 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5\": container with ID starting with ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5 not found: ID does not exist" containerID="ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5" Jan 28 18:00:01 crc kubenswrapper[4877]: I0128 18:00:01.662863 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5"} err="failed to get container status \"ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5\": rpc error: code = NotFound desc = could not find container \"ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5\": container with ID starting with ee7949b510ce372591874198db8742684fb18020ab7a7ad6f72351fb345ca8c5 not found: ID does not exist" Jan 28 18:00:02 crc kubenswrapper[4877]: I0128 18:00:02.565682 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" event={"ID":"4bc3d006-7d31-436b-bf95-a1441a3d448c","Type":"ContainerStarted","Data":"6b5830566ed95d1c90df58292cc5e84a8044302168e0f0973d78fefdd5056781"} Jan 28 18:00:02 crc kubenswrapper[4877]: I0128 18:00:02.592154 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" podStartSLOduration=2.592130842 podStartE2EDuration="2.592130842s" podCreationTimestamp="2026-01-28 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:00:02.585667117 +0000 UTC m=+5106.143994005" watchObservedRunningTime="2026-01-28 18:00:02.592130842 +0000 UTC m=+5106.150457740" Jan 28 18:00:03 crc kubenswrapper[4877]: I0128 18:00:03.351303 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" path="/var/lib/kubelet/pods/67525667-217c-4c1c-b182-6dffcc277ff6/volumes" Jan 28 18:00:03 crc kubenswrapper[4877]: I0128 18:00:03.588741 4877 generic.go:334] "Generic (PLEG): container finished" podID="4bc3d006-7d31-436b-bf95-a1441a3d448c" containerID="6b5830566ed95d1c90df58292cc5e84a8044302168e0f0973d78fefdd5056781" exitCode=0 Jan 28 18:00:03 crc kubenswrapper[4877]: I0128 18:00:03.588977 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" event={"ID":"4bc3d006-7d31-436b-bf95-a1441a3d448c","Type":"ContainerDied","Data":"6b5830566ed95d1c90df58292cc5e84a8044302168e0f0973d78fefdd5056781"} Jan 28 18:00:03 crc kubenswrapper[4877]: I0128 18:00:03.618946 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:00:03 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:00:03 crc kubenswrapper[4877]: > Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.513242 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.593785 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxx2f\" (UniqueName: \"kubernetes.io/projected/4bc3d006-7d31-436b-bf95-a1441a3d448c-kube-api-access-rxx2f\") pod \"4bc3d006-7d31-436b-bf95-a1441a3d448c\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.594030 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bc3d006-7d31-436b-bf95-a1441a3d448c-config-volume\") pod \"4bc3d006-7d31-436b-bf95-a1441a3d448c\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.594073 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4bc3d006-7d31-436b-bf95-a1441a3d448c-secret-volume\") pod \"4bc3d006-7d31-436b-bf95-a1441a3d448c\" (UID: \"4bc3d006-7d31-436b-bf95-a1441a3d448c\") " Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.595160 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bc3d006-7d31-436b-bf95-a1441a3d448c-config-volume" (OuterVolumeSpecName: "config-volume") pod "4bc3d006-7d31-436b-bf95-a1441a3d448c" (UID: "4bc3d006-7d31-436b-bf95-a1441a3d448c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.603265 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bc3d006-7d31-436b-bf95-a1441a3d448c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4bc3d006-7d31-436b-bf95-a1441a3d448c" (UID: "4bc3d006-7d31-436b-bf95-a1441a3d448c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.606808 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bc3d006-7d31-436b-bf95-a1441a3d448c-kube-api-access-rxx2f" (OuterVolumeSpecName: "kube-api-access-rxx2f") pod "4bc3d006-7d31-436b-bf95-a1441a3d448c" (UID: "4bc3d006-7d31-436b-bf95-a1441a3d448c"). InnerVolumeSpecName "kube-api-access-rxx2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.632901 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" event={"ID":"4bc3d006-7d31-436b-bf95-a1441a3d448c","Type":"ContainerDied","Data":"c773f5a59026e73e5ff0fabc8ef046f4f810981d575e4c71b2c6978fe03b6620"} Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.632947 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c773f5a59026e73e5ff0fabc8ef046f4f810981d575e4c71b2c6978fe03b6620" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.632996 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493720-n2pjq" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.694551 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs"] Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.699008 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4bc3d006-7d31-436b-bf95-a1441a3d448c-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.699054 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4bc3d006-7d31-436b-bf95-a1441a3d448c-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.699071 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxx2f\" (UniqueName: \"kubernetes.io/projected/4bc3d006-7d31-436b-bf95-a1441a3d448c-kube-api-access-rxx2f\") on node \"crc\" DevicePath \"\"" Jan 28 18:00:05 crc kubenswrapper[4877]: I0128 18:00:05.710355 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493675-fkxbs"] Jan 28 18:00:07 crc kubenswrapper[4877]: I0128 18:00:07.343836 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:00:07 crc kubenswrapper[4877]: E0128 18:00:07.344738 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:00:07 crc kubenswrapper[4877]: I0128 18:00:07.356284 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57b5ce29-2759-4dc4-b3a1-3477aa0c11a4" path="/var/lib/kubelet/pods/57b5ce29-2759-4dc4-b3a1-3477aa0c11a4/volumes" Jan 28 18:00:12 crc kubenswrapper[4877]: I0128 18:00:12.947880 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:00:12 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:00:12 crc kubenswrapper[4877]: > Jan 28 18:00:18 crc kubenswrapper[4877]: I0128 18:00:18.331217 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:00:18 crc kubenswrapper[4877]: E0128 18:00:18.332253 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:00:22 crc kubenswrapper[4877]: I0128 18:00:22.983272 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:00:22 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:00:22 crc kubenswrapper[4877]: > Jan 28 18:00:32 crc kubenswrapper[4877]: I0128 18:00:32.277049 4877 scope.go:117] "RemoveContainer" containerID="1e1d54838a5a193d176591ac2145c35e1e2d1897044d531b71a10dcb28dceb16" Jan 28 18:00:32 crc kubenswrapper[4877]: I0128 18:00:32.331129 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:00:32 crc kubenswrapper[4877]: E0128 18:00:32.331468 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:00:32 crc kubenswrapper[4877]: I0128 18:00:32.949641 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:00:32 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:00:32 crc kubenswrapper[4877]: > Jan 28 18:00:42 crc kubenswrapper[4877]: I0128 18:00:42.961248 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:00:42 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:00:42 crc kubenswrapper[4877]: > Jan 28 18:00:43 crc kubenswrapper[4877]: I0128 18:00:43.333736 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:00:43 crc kubenswrapper[4877]: E0128 18:00:43.334073 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:00:52 crc kubenswrapper[4877]: I0128 18:00:52.957433 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:00:52 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:00:52 crc kubenswrapper[4877]: > Jan 28 18:00:56 crc kubenswrapper[4877]: I0128 18:00:56.330509 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:00:56 crc kubenswrapper[4877]: E0128 18:00:56.331350 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.342123 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29493721-mw58j"] Jan 28 18:01:00 crc kubenswrapper[4877]: E0128 18:01:00.344658 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.344691 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" Jan 28 18:01:00 crc kubenswrapper[4877]: E0128 18:01:00.344720 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="extract-content" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.344729 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="extract-content" Jan 28 18:01:00 crc kubenswrapper[4877]: E0128 18:01:00.344774 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="extract-utilities" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.344782 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="extract-utilities" Jan 28 18:01:00 crc kubenswrapper[4877]: E0128 18:01:00.344796 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bc3d006-7d31-436b-bf95-a1441a3d448c" containerName="collect-profiles" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.344802 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bc3d006-7d31-436b-bf95-a1441a3d448c" containerName="collect-profiles" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.345061 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bc3d006-7d31-436b-bf95-a1441a3d448c" containerName="collect-profiles" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.345083 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="67525667-217c-4c1c-b182-6dffcc277ff6" containerName="registry-server" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.348057 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.378145 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-config-data\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.378223 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-fernet-keys\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.378289 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-combined-ca-bundle\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.378565 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v79t\" (UniqueName: \"kubernetes.io/projected/4b261872-0c7f-44ac-962b-a16fba14887b-kube-api-access-2v79t\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.427934 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493721-mw58j"] Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.488017 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-combined-ca-bundle\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.490830 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v79t\" (UniqueName: \"kubernetes.io/projected/4b261872-0c7f-44ac-962b-a16fba14887b-kube-api-access-2v79t\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.491255 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-config-data\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.491325 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-fernet-keys\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.538246 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-combined-ca-bundle\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.540868 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-fernet-keys\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.541597 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-config-data\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.542991 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v79t\" (UniqueName: \"kubernetes.io/projected/4b261872-0c7f-44ac-962b-a16fba14887b-kube-api-access-2v79t\") pod \"keystone-cron-29493721-mw58j\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:00 crc kubenswrapper[4877]: I0128 18:01:00.675340 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:02 crc kubenswrapper[4877]: I0128 18:01:02.954250 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:01:02 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:01:02 crc kubenswrapper[4877]: > Jan 28 18:01:02 crc kubenswrapper[4877]: I0128 18:01:02.959154 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:01:02 crc kubenswrapper[4877]: I0128 18:01:02.960792 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"24f13f5541ec41b3d46499d7abef67bd3697d72343bff75dad48f6a125abd07d"} pod="openshift-marketplace/redhat-operators-9lmsk" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 28 18:01:02 crc kubenswrapper[4877]: I0128 18:01:02.961401 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" containerID="cri-o://24f13f5541ec41b3d46499d7abef67bd3697d72343bff75dad48f6a125abd07d" gracePeriod=30 Jan 28 18:01:03 crc kubenswrapper[4877]: I0128 18:01:03.103092 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493721-mw58j"] Jan 28 18:01:03 crc kubenswrapper[4877]: I0128 18:01:03.289927 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493721-mw58j" event={"ID":"4b261872-0c7f-44ac-962b-a16fba14887b","Type":"ContainerStarted","Data":"5e34218c11c92a83f0f1efdb7d88384b426519be9c802c5f4efd48e17f63f8c2"} Jan 28 18:01:05 crc kubenswrapper[4877]: I0128 18:01:05.323994 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493721-mw58j" event={"ID":"4b261872-0c7f-44ac-962b-a16fba14887b","Type":"ContainerStarted","Data":"489ba60bca1083badd6b03079bc3e178392fe800b28184511b626c0d1d73bf38"} Jan 28 18:01:05 crc kubenswrapper[4877]: I0128 18:01:05.376548 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29493721-mw58j" podStartSLOduration=5.372415396 podStartE2EDuration="5.372415396s" podCreationTimestamp="2026-01-28 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:01:05.36663349 +0000 UTC m=+5168.924960398" watchObservedRunningTime="2026-01-28 18:01:05.372415396 +0000 UTC m=+5168.930742284" Jan 28 18:01:08 crc kubenswrapper[4877]: I0128 18:01:08.330602 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:01:08 crc kubenswrapper[4877]: E0128 18:01:08.333425 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:01:17 crc kubenswrapper[4877]: I0128 18:01:17.476108 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493721-mw58j" event={"ID":"4b261872-0c7f-44ac-962b-a16fba14887b","Type":"ContainerDied","Data":"489ba60bca1083badd6b03079bc3e178392fe800b28184511b626c0d1d73bf38"} Jan 28 18:01:17 crc kubenswrapper[4877]: I0128 18:01:17.498754 4877 generic.go:334] "Generic (PLEG): container finished" podID="4b261872-0c7f-44ac-962b-a16fba14887b" containerID="489ba60bca1083badd6b03079bc3e178392fe800b28184511b626c0d1d73bf38" exitCode=0 Jan 28 18:01:22 crc kubenswrapper[4877]: I0128 18:01:22.122205 4877 trace.go:236] Trace[1925036451]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/certified-operators-8q65r" (28-Jan-2026 18:01:20.264) (total time: 1852ms): Jan 28 18:01:22 crc kubenswrapper[4877]: Trace[1925036451]: [1.852710761s] [1.852710761s] END Jan 28 18:01:22 crc kubenswrapper[4877]: I0128 18:01:22.331442 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:01:22 crc kubenswrapper[4877]: E0128 18:01:22.332171 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.081247 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.186505 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-combined-ca-bundle\") pod \"4b261872-0c7f-44ac-962b-a16fba14887b\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.186697 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-fernet-keys\") pod \"4b261872-0c7f-44ac-962b-a16fba14887b\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.186820 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-config-data\") pod \"4b261872-0c7f-44ac-962b-a16fba14887b\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.187152 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2v79t\" (UniqueName: \"kubernetes.io/projected/4b261872-0c7f-44ac-962b-a16fba14887b-kube-api-access-2v79t\") pod \"4b261872-0c7f-44ac-962b-a16fba14887b\" (UID: \"4b261872-0c7f-44ac-962b-a16fba14887b\") " Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.564571 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493721-mw58j" event={"ID":"4b261872-0c7f-44ac-962b-a16fba14887b","Type":"ContainerDied","Data":"5e34218c11c92a83f0f1efdb7d88384b426519be9c802c5f4efd48e17f63f8c2"} Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.567831 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e34218c11c92a83f0f1efdb7d88384b426519be9c802c5f4efd48e17f63f8c2" Jan 28 18:01:23 crc kubenswrapper[4877]: I0128 18:01:23.571693 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493721-mw58j" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.098911 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4b261872-0c7f-44ac-962b-a16fba14887b" (UID: "4b261872-0c7f-44ac-962b-a16fba14887b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.110325 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b261872-0c7f-44ac-962b-a16fba14887b-kube-api-access-2v79t" (OuterVolumeSpecName: "kube-api-access-2v79t") pod "4b261872-0c7f-44ac-962b-a16fba14887b" (UID: "4b261872-0c7f-44ac-962b-a16fba14887b"). InnerVolumeSpecName "kube-api-access-2v79t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.114303 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b261872-0c7f-44ac-962b-a16fba14887b" (UID: "4b261872-0c7f-44ac-962b-a16fba14887b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.127395 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v79t\" (UniqueName: \"kubernetes.io/projected/4b261872-0c7f-44ac-962b-a16fba14887b-kube-api-access-2v79t\") on node \"crc\" DevicePath \"\"" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.127431 4877 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.127440 4877 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.278799 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-config-data" (OuterVolumeSpecName: "config-data") pod "4b261872-0c7f-44ac-962b-a16fba14887b" (UID: "4b261872-0c7f-44ac-962b-a16fba14887b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.333110 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b261872-0c7f-44ac-962b-a16fba14887b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:01:24 crc kubenswrapper[4877]: I0128 18:01:24.597294 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:01:34 crc kubenswrapper[4877]: I0128 18:01:34.059150 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/0.log" Jan 28 18:01:34 crc kubenswrapper[4877]: I0128 18:01:34.065756 4877 generic.go:334] "Generic (PLEG): container finished" podID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerID="24f13f5541ec41b3d46499d7abef67bd3697d72343bff75dad48f6a125abd07d" exitCode=137 Jan 28 18:01:34 crc kubenswrapper[4877]: I0128 18:01:34.065798 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerDied","Data":"24f13f5541ec41b3d46499d7abef67bd3697d72343bff75dad48f6a125abd07d"} Jan 28 18:01:35 crc kubenswrapper[4877]: I0128 18:01:35.410834 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:01:35 crc kubenswrapper[4877]: E0128 18:01:35.439196 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:01:42 crc kubenswrapper[4877]: I0128 18:01:42.157661 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/0.log" Jan 28 18:01:42 crc kubenswrapper[4877]: I0128 18:01:42.160228 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerStarted","Data":"1d6b80ce5f7fb85778eeff2a24ed478861089cbb66c8347538a0400dc8d2a031"} Jan 28 18:01:50 crc kubenswrapper[4877]: I0128 18:01:50.344397 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:01:50 crc kubenswrapper[4877]: E0128 18:01:50.345585 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:01:51 crc kubenswrapper[4877]: I0128 18:01:51.901431 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:01:51 crc kubenswrapper[4877]: I0128 18:01:51.902052 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:01:53 crc kubenswrapper[4877]: I0128 18:01:53.198246 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:01:53 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:01:53 crc kubenswrapper[4877]: > Jan 28 18:02:02 crc kubenswrapper[4877]: I0128 18:02:02.946870 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:02:02 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:02:02 crc kubenswrapper[4877]: > Jan 28 18:02:03 crc kubenswrapper[4877]: I0128 18:02:03.341873 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:02:03 crc kubenswrapper[4877]: E0128 18:02:03.351601 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:02:12 crc kubenswrapper[4877]: I0128 18:02:12.959958 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:02:12 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:02:12 crc kubenswrapper[4877]: > Jan 28 18:02:17 crc kubenswrapper[4877]: I0128 18:02:17.351665 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:02:17 crc kubenswrapper[4877]: E0128 18:02:17.353071 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:02:22 crc kubenswrapper[4877]: I0128 18:02:22.957828 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:02:22 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:02:22 crc kubenswrapper[4877]: > Jan 28 18:02:31 crc kubenswrapper[4877]: I0128 18:02:31.346086 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:02:31 crc kubenswrapper[4877]: E0128 18:02:31.347889 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:02:33 crc kubenswrapper[4877]: I0128 18:02:33.009765 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:02:33 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:02:33 crc kubenswrapper[4877]: > Jan 28 18:02:42 crc kubenswrapper[4877]: I0128 18:02:42.956600 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:02:42 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:02:42 crc kubenswrapper[4877]: > Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.426367 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:02:44 crc kubenswrapper[4877]: E0128 18:02:44.454223 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.487425 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-msptv"] Jan 28 18:02:44 crc kubenswrapper[4877]: E0128 18:02:44.557494 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b261872-0c7f-44ac-962b-a16fba14887b" containerName="keystone-cron" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.557797 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b261872-0c7f-44ac-962b-a16fba14887b" containerName="keystone-cron" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.569163 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b261872-0c7f-44ac-962b-a16fba14887b" containerName="keystone-cron" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.582555 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vs7bc"] Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.584068 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.588347 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.648322 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.709706 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-utilities\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.709995 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-utilities\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.717920 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-catalog-content\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.718052 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx9lc\" (UniqueName: \"kubernetes.io/projected/88ff251b-c893-4ea0-baa4-7d69abc30ac1-kube-api-access-zx9lc\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.718286 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh2mm\" (UniqueName: \"kubernetes.io/projected/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-kube-api-access-xh2mm\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.718523 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-catalog-content\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.728092 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-msptv"] Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.746266 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vs7bc"] Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.821853 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-catalog-content\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.822023 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-utilities\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.822115 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-utilities\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.822169 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-catalog-content\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.822222 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx9lc\" (UniqueName: \"kubernetes.io/projected/88ff251b-c893-4ea0-baa4-7d69abc30ac1-kube-api-access-zx9lc\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.822301 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh2mm\" (UniqueName: \"kubernetes.io/projected/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-kube-api-access-xh2mm\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.902985 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-utilities\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.930713 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-catalog-content\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.934651 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-utilities\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:44 crc kubenswrapper[4877]: I0128 18:02:44.941144 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-catalog-content\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:45 crc kubenswrapper[4877]: I0128 18:02:45.023565 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh2mm\" (UniqueName: \"kubernetes.io/projected/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-kube-api-access-xh2mm\") pod \"certified-operators-vs7bc\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:45 crc kubenswrapper[4877]: I0128 18:02:45.044574 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx9lc\" (UniqueName: \"kubernetes.io/projected/88ff251b-c893-4ea0-baa4-7d69abc30ac1-kube-api-access-zx9lc\") pod \"redhat-marketplace-msptv\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:45 crc kubenswrapper[4877]: I0128 18:02:45.184464 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:02:45 crc kubenswrapper[4877]: I0128 18:02:45.198630 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:02:48 crc kubenswrapper[4877]: I0128 18:02:48.022639 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:02:48 crc kubenswrapper[4877]: I0128 18:02:48.022693 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:02:48 crc kubenswrapper[4877]: I0128 18:02:48.953337 4877 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.562488455s: [/var/lib/containers/storage/overlay/d1dcf7af71b708f56e2ced523f70533df0b7a2a180f1dc47e5732150113c9fde/diff /var/log/pods/openstack_openstackclient_e5410b8c-cff8-4df4-885f-e550cf3d6dfd/openstackclient/0.log]; will not log again for this container unless duration exceeds 2s Jan 28 18:02:53 crc kubenswrapper[4877]: I0128 18:02:53.028604 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:02:53 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:02:53 crc kubenswrapper[4877]: > Jan 28 18:02:54 crc kubenswrapper[4877]: I0128 18:02:54.731076 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vs7bc"] Jan 28 18:02:54 crc kubenswrapper[4877]: I0128 18:02:54.744052 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-msptv"] Jan 28 18:02:54 crc kubenswrapper[4877]: W0128 18:02:54.963460 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88ff251b_c893_4ea0_baa4_7d69abc30ac1.slice/crio-c185fc89021dfe66164c12807d1ae4a288e14c761b9379fdf3c0e2b51644c195 WatchSource:0}: Error finding container c185fc89021dfe66164c12807d1ae4a288e14c761b9379fdf3c0e2b51644c195: Status 404 returned error can't find the container with id c185fc89021dfe66164c12807d1ae4a288e14c761b9379fdf3c0e2b51644c195 Jan 28 18:02:54 crc kubenswrapper[4877]: W0128 18:02:54.969467 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbf9a10ec_fadb_42f5_80ea_7e5caa7a4dc9.slice/crio-4e0cf531afc333b0ec63694a8aa0b99002cddac6bb4f88ba41a7adab1561e233 WatchSource:0}: Error finding container 4e0cf531afc333b0ec63694a8aa0b99002cddac6bb4f88ba41a7adab1561e233: Status 404 returned error can't find the container with id 4e0cf531afc333b0ec63694a8aa0b99002cddac6bb4f88ba41a7adab1561e233 Jan 28 18:02:55 crc kubenswrapper[4877]: I0128 18:02:55.076277 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs7bc" event={"ID":"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9","Type":"ContainerStarted","Data":"4e0cf531afc333b0ec63694a8aa0b99002cddac6bb4f88ba41a7adab1561e233"} Jan 28 18:02:55 crc kubenswrapper[4877]: I0128 18:02:55.077415 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msptv" event={"ID":"88ff251b-c893-4ea0-baa4-7d69abc30ac1","Type":"ContainerStarted","Data":"c185fc89021dfe66164c12807d1ae4a288e14c761b9379fdf3c0e2b51644c195"} Jan 28 18:02:57 crc kubenswrapper[4877]: I0128 18:02:57.298385 4877 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.31671049s: [/var/lib/containers/storage/overlay/ce79aac9f3e9e0402833fda4e0858edd0e8a7d3fbf4ec6bd7e76b7ff76511b33/diff /var/log/pods/openstack_aodh-0_3e9dfe3e-469f-49a4-9956-85cc87e7a16a/aodh-api/0.log]; will not log again for this container unless duration exceeds 2s Jan 28 18:02:58 crc kubenswrapper[4877]: I0128 18:02:58.114907 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs7bc" event={"ID":"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9","Type":"ContainerDied","Data":"d4bbd41832676d25b154042830ac3fbe76b430f639bbc400f58031926108755c"} Jan 28 18:02:58 crc kubenswrapper[4877]: I0128 18:02:58.195031 4877 generic.go:334] "Generic (PLEG): container finished" podID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerID="d4bbd41832676d25b154042830ac3fbe76b430f639bbc400f58031926108755c" exitCode=0 Jan 28 18:02:58 crc kubenswrapper[4877]: I0128 18:02:58.201168 4877 generic.go:334] "Generic (PLEG): container finished" podID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerID="537b0410574daa840ae1ba0f6ab3215df12c7ab196dfba12e5cc2893447f2ce8" exitCode=0 Jan 28 18:02:58 crc kubenswrapper[4877]: I0128 18:02:58.201207 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msptv" event={"ID":"88ff251b-c893-4ea0-baa4-7d69abc30ac1","Type":"ContainerDied","Data":"537b0410574daa840ae1ba0f6ab3215df12c7ab196dfba12e5cc2893447f2ce8"} Jan 28 18:02:59 crc kubenswrapper[4877]: I0128 18:02:59.298559 4877 trace.go:236] Trace[1699561759]: "Calculate volume metrics of swift for pod openstack/swift-storage-0" (28-Jan-2026 18:02:57.950) (total time: 1316ms): Jan 28 18:02:59 crc kubenswrapper[4877]: Trace[1699561759]: [1.316382511s] [1.316382511s] END Jan 28 18:02:59 crc kubenswrapper[4877]: I0128 18:02:59.313750 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:02:59 crc kubenswrapper[4877]: I0128 18:02:59.344282 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:02:59 crc kubenswrapper[4877]: E0128 18:02:59.385710 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:03:02 crc kubenswrapper[4877]: I0128 18:03:02.957351 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:02 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:02 crc kubenswrapper[4877]: > Jan 28 18:03:07 crc kubenswrapper[4877]: I0128 18:03:07.657825 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msptv" event={"ID":"88ff251b-c893-4ea0-baa4-7d69abc30ac1","Type":"ContainerStarted","Data":"6c643b54bc518d99796d626c7b93289eb691e36cfe57cd13691bf9ac08cebf8e"} Jan 28 18:03:07 crc kubenswrapper[4877]: I0128 18:03:07.660887 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs7bc" event={"ID":"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9","Type":"ContainerStarted","Data":"843742a09a04c38955284b002724373ecec2529696eb42253bf00f81d1a325ef"} Jan 28 18:03:11 crc kubenswrapper[4877]: I0128 18:03:11.341377 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:03:11 crc kubenswrapper[4877]: E0128 18:03:11.346014 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:03:12 crc kubenswrapper[4877]: I0128 18:03:12.404454 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" podUID="a18cdf94-0fd1-491c-8213-f2bd11b787e2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:13 crc kubenswrapper[4877]: I0128 18:03:13.351884 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:13 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:13 crc kubenswrapper[4877]: > Jan 28 18:03:14 crc kubenswrapper[4877]: I0128 18:03:14.586619 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:15 crc kubenswrapper[4877]: I0128 18:03:15.760113 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="5b2e9ef8-3e88-4cec-bda6-2c143670f73a" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 28 18:03:15 crc kubenswrapper[4877]: I0128 18:03:15.760664 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="5b2e9ef8-3e88-4cec-bda6-2c143670f73a" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 28 18:03:19 crc kubenswrapper[4877]: I0128 18:03:19.911362 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:19 crc kubenswrapper[4877]: I0128 18:03:19.911539 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:19 crc kubenswrapper[4877]: I0128 18:03:19.929626 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:19 crc kubenswrapper[4877]: I0128 18:03:19.929550 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:21 crc kubenswrapper[4877]: I0128 18:03:21.827701 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-init-7997469d6c-45lz7" podUID="c469a239-6ccb-4dd5-8778-3921ec52b6fb" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:22 crc kubenswrapper[4877]: I0128 18:03:21.886782 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" podUID="2106e351-4841-4ab5-84eb-745af2cb3379" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:22 crc kubenswrapper[4877]: I0128 18:03:22.555726 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" podUID="f47bead5-fd76-4061-8ca4-51ed7bf2d97d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:23 crc kubenswrapper[4877]: I0128 18:03:23.251328 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-fp9w8" podUID="a782acd7-6ba8-4909-94e9-5005fd637272" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:23 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:23 crc kubenswrapper[4877]: > Jan 28 18:03:23 crc kubenswrapper[4877]: I0128 18:03:23.253864 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-fp9w8" podUID="a782acd7-6ba8-4909-94e9-5005fd637272" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:23 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:23 crc kubenswrapper[4877]: > Jan 28 18:03:23 crc kubenswrapper[4877]: I0128 18:03:23.253875 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:23 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:23 crc kubenswrapper[4877]: > Jan 28 18:03:23 crc kubenswrapper[4877]: I0128 18:03:23.355610 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:03:23 crc kubenswrapper[4877]: I0128 18:03:23.428444 4877 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.265771982s: [/var/lib/containers/storage/overlay/1131464be5cab9e222fc9a2223326e7a244adc221cae0102cd5f5cc43a482ca8/diff /var/log/pods/openstack_placement-7b4cc7844-8vpdw_c679cf09-1426-4fc7-85c6-b6be6cfb6153/placement-api/0.log]; will not log again for this container unless duration exceeds 2s Jan 28 18:03:23 crc kubenswrapper[4877]: I0128 18:03:23.460032 4877 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.406602329s: [/var/lib/containers/storage/overlay/ca1450e8eb87f22be77e5f6792a6a8e3e157fcc985623bc2a061e7ff5153df54/diff /var/log/pods/openshift-console_downloads-7954f5f757-vpzx9_754cf791-541c-4944-bf3e-7ba18f44d8de/download-server/0.log]; will not log again for this container unless duration exceeds 2s Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.429337 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.429463 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.441333 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.441546 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.586601 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.078265 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"1d6b80ce5f7fb85778eeff2a24ed478861089cbb66c8347538a0400dc8d2a031"} pod="openshift-marketplace/redhat-operators-9lmsk" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.617812 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.617868 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:24 crc kubenswrapper[4877]: I0128 18:03:24.632390 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" containerID="cri-o://1d6b80ce5f7fb85778eeff2a24ed478861089cbb66c8347538a0400dc8d2a031" gracePeriod=30 Jan 28 18:03:25 crc kubenswrapper[4877]: I0128 18:03:25.052950 4877 trace.go:236] Trace[1238015529]: "Calculate volume metrics of ca-trust-extracted for pod openshift-image-registry/image-registry-66df7c8f76-62l6g" (28-Jan-2026 18:03:23.141) (total time: 1891ms): Jan 28 18:03:25 crc kubenswrapper[4877]: Trace[1238015529]: [1.89112319s] [1.89112319s] END Jan 28 18:03:25 crc kubenswrapper[4877]: I0128 18:03:25.755021 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="5fbd26ca-eb13-4e63-b055-3ee514dbcea6" containerName="prometheus" probeResult="failure" output="command timed out" Jan 28 18:03:25 crc kubenswrapper[4877]: I0128 18:03:25.755954 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="5fbd26ca-eb13-4e63-b055-3ee514dbcea6" containerName="prometheus" probeResult="failure" output="command timed out" Jan 28 18:03:25 crc kubenswrapper[4877]: I0128 18:03:25.827710 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:03:26 crc kubenswrapper[4877]: E0128 18:03:26.031602 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:03:26 crc kubenswrapper[4877]: I0128 18:03:26.755997 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:26 crc kubenswrapper[4877]: I0128 18:03:26.756468 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:27 crc kubenswrapper[4877]: I0128 18:03:27.065430 4877 patch_prober.go:28] interesting pod/metrics-server-fbbd74554-qkt8l container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:27 crc kubenswrapper[4877]: I0128 18:03:27.065921 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:27 crc kubenswrapper[4877]: I0128 18:03:27.270741 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-694cf4f878-zsqn6" podUID="6fdf0399-314b-40df-96f2-c27008769f71" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:27 crc kubenswrapper[4877]: I0128 18:03:27.354871 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="327e7593-5623-475c-ad8f-2456a437a645" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.219:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:27 crc kubenswrapper[4877]: I0128 18:03:27.458192 4877 patch_prober.go:28] interesting pod/monitoring-plugin-595f97fc4c-4kdp8 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.76:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:27 crc kubenswrapper[4877]: I0128 18:03:27.458275 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" podUID="e831d929-583c-4da5-8ab2-27d484da84b2" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.76:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:27 crc kubenswrapper[4877]: I0128 18:03:27.759602 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" probeResult="failure" output="command timed out" Jan 28 18:03:28 crc kubenswrapper[4877]: I0128 18:03:27.759819 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" probeResult="failure" output="command timed out" Jan 28 18:03:28 crc kubenswrapper[4877]: I0128 18:03:28.189964 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:28 crc kubenswrapper[4877]: I0128 18:03:28.190309 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:28 crc kubenswrapper[4877]: I0128 18:03:28.948559 4877 patch_prober.go:28] interesting pod/loki-operator-controller-manager-dd586d7ff-8kr7k container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.48:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:28 crc kubenswrapper[4877]: I0128 18:03:28.948970 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" podUID="930eb8ce-5fe9-4e7d-a700-d52614d5915a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.48:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:28 crc kubenswrapper[4877]: I0128 18:03:28.949045 4877 patch_prober.go:28] interesting pod/loki-operator-controller-manager-dd586d7ff-8kr7k container/manager namespace/openshift-operators-redhat: Liveness probe status=failure output="Get \"http://10.217.0.48:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:28 crc kubenswrapper[4877]: I0128 18:03:28.949064 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" podUID="930eb8ce-5fe9-4e7d-a700-d52614d5915a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.48:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:29 crc kubenswrapper[4877]: I0128 18:03:29.099624 4877 patch_prober.go:28] interesting pod/console-6c679b788d-zft7r container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:29 crc kubenswrapper[4877]: I0128 18:03:29.099692 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-6c679b788d-zft7r" podUID="1a65e44f-17f2-42b4-80f9-5f4f06368d7d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:29 crc kubenswrapper[4877]: I0128 18:03:29.558332 4877 trace.go:236] Trace[123917423]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/certified-operators-8q65r" (28-Jan-2026 18:03:20.328) (total time: 9229ms): Jan 28 18:03:29 crc kubenswrapper[4877]: Trace[123917423]: [9.229393814s] [9.229393814s] END Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.050991 4877 trace.go:236] Trace[1319213203]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-62l6g" (28-Jan-2026 18:03:25.057) (total time: 4993ms): Jan 28 18:03:30 crc kubenswrapper[4877]: Trace[1319213203]: [4.993702585s] [4.993702585s] END Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.050996 4877 trace.go:236] Trace[799655913]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-server-2" (28-Jan-2026 18:03:28.958) (total time: 1092ms): Jan 28 18:03:30 crc kubenswrapper[4877]: Trace[799655913]: [1.092302533s] [1.092302533s] END Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.135168 4877 patch_prober.go:28] interesting pod/image-registry-66df7c8f76-62l6g container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.60:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.135260 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-66df7c8f76-62l6g" podUID="652e294d-efe3-4f93-828f-c6cacf3d7166" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.60:5000/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.489716 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.489783 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.489861 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.489791 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.614871 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.614953 4877 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-rlzcf container/package-server-manager namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.615410 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" podUID="a7623e5a-223d-4da1-94fe-d671bfc4cb3d" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.25:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.614991 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.615500 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.615013 4877 patch_prober.go:28] interesting pod/package-server-manager-789f6589d5-rlzcf container/package-server-manager namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"http://10.217.0.25:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.615536 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rlzcf" podUID="a7623e5a-223d-4da1-94fe-d671bfc4cb3d" containerName="package-server-manager" probeResult="failure" output="Get \"http://10.217.0.25:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.615357 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.617785 4877 trace.go:236] Trace[398414772]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/redhat-operators-psvnx" (28-Jan-2026 18:03:26.525) (total time: 4092ms): Jan 28 18:03:30 crc kubenswrapper[4877]: Trace[398414772]: [4.092634883s] [4.092634883s] END Jan 28 18:03:30 crc kubenswrapper[4877]: I0128 18:03:30.756814 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="5b2e9ef8-3e88-4cec-bda6-2c143670f73a" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.204773 4877 generic.go:334] "Generic (PLEG): container finished" podID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerID="6c643b54bc518d99796d626c7b93289eb691e36cfe57cd13691bf9ac08cebf8e" exitCode=0 Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.204839 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msptv" event={"ID":"88ff251b-c893-4ea0-baa4-7d69abc30ac1","Type":"ContainerDied","Data":"6c643b54bc518d99796d626c7b93289eb691e36cfe57cd13691bf9ac08cebf8e"} Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.411302 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" podUID="e068371c-e59c-4e57-8fd3-a55470f67063" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.411306 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" podUID="e068371c-e59c-4e57-8fd3-a55470f67063" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.632729 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" podUID="c2d80875-c32f-4596-a44d-6a4b9d524304" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.633093 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" podUID="55abdd00-6b2b-44a2-ae22-bae3fbb12282" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.634380 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-hsh6j" podUID="55abdd00-6b2b-44a2-ae22-bae3fbb12282" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.105:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.634416 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" podUID="c2d80875-c32f-4596-a44d-6a4b9d524304" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.832648 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" podUID="434f69b5-0d70-418d-aa5e-04e307a5399c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.832676 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" podUID="434f69b5-0d70-418d-aa5e-04e307a5399c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.928651 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" podUID="2106e351-4841-4ab5-84eb-745af2cb3379" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:31 crc kubenswrapper[4877]: I0128 18:03:31.928721 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-bbb7n" podUID="2106e351-4841-4ab5-84eb-745af2cb3379" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.093702 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" podUID="aa23290f-1702-4c63-92c7-047d18922df9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.093697 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" podUID="aa23290f-1702-4c63-92c7-047d18922df9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.176729 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" podUID="4533e492-2631-4c22-af2f-6bec08b23280" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.214010 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-4m9v2 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.214052 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-4m9v2 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.214126 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" podUID="f014bcf5-ec99-4a23-a06c-29c2e8213375" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.54:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.214108 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" podUID="f014bcf5-ec99-4a23-a06c-29c2e8213375" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.54:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.259663 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" podUID="337c06ec-4c42-41a1-8faa-60338d4eeddc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.259703 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" podUID="1b84deae-93ba-48f2-88b2-583025b41dc0" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.322190 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-h6fl4 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.322257 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" podUID="d4a36cc7-b86f-45f7-a422-db7241ba6513" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.322269 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-h6fl4 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.322336 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" podUID="d4a36cc7-b86f-45f7-a422-db7241ba6513" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.343089 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" podUID="cc059705-cab0-43ef-b078-34509b901591" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.343132 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" podUID="337c06ec-4c42-41a1-8faa-60338d4eeddc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.344919 4877 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-mzftl container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.16:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.344984 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" podUID="d18fe762-0cf5-444e-a4c0-28e812f435fa" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.16:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.345032 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" podUID="4533e492-2631-4c22-af2f-6bec08b23280" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.345067 4877 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-b824q container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.68:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.345095 4877 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-b824q container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.68:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.345157 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" podUID="44b0ea14-238d-4f58-b504-b6375aa5137b" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.68:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.345305 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" podUID="44b0ea14-238d-4f58-b504-b6375aa5137b" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.68:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.425183 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/neutron-operator-controller-manager-78d58447c5-cl46k" podUID="1b84deae-93ba-48f2-88b2-583025b41dc0" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.112:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.425366 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/octavia-operator-controller-manager-5f4cd88d46-bdsdg" podUID="cc059705-cab0-43ef-b078-34509b901591" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.114:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.506682 4877 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-mzftl container/perses-operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.16:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.506688 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" podUID="dc576625-1984-4bcf-9c11-8dfbe037d0a1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.506742 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" podUID="d18fe762-0cf5-444e-a4c0-28e812f435fa" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.16:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.507124 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" podUID="dc576625-1984-4bcf-9c11-8dfbe037d0a1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.507169 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" podUID="a18cdf94-0fd1-491c-8213-f2bd11b787e2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.599680 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" podUID="f47bead5-fd76-4061-8ca4-51ed7bf2d97d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.682700 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-2zpzm" podUID="f47bead5-fd76-4061-8ca4-51ed7bf2d97d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.682777 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" podUID="c6112d71-edde-4615-9f4d-1c59cf38702d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:32 crc kubenswrapper[4877]: I0128 18:03:32.682969 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" podUID="c6112d71-edde-4615-9f4d-1c59cf38702d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:33 crc kubenswrapper[4877]: I0128 18:03:33.252105 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-fp9w8" podUID="a782acd7-6ba8-4909-94e9-5005fd637272" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:33 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:33 crc kubenswrapper[4877]: > Jan 28 18:03:33 crc kubenswrapper[4877]: I0128 18:03:33.423924 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-fp9w8" podUID="a782acd7-6ba8-4909-94e9-5005fd637272" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:33 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:33 crc kubenswrapper[4877]: > Jan 28 18:03:33 crc kubenswrapper[4877]: I0128 18:03:33.672641 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" podUID="59f588be-7008-4941-a210-ba17edc1ff30" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:33 crc kubenswrapper[4877]: I0128 18:03:33.672771 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/metallb-operator-webhook-server-9b8bf7874-4g47r" podUID="59f588be-7008-4941-a210-ba17edc1ff30" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:33 crc kubenswrapper[4877]: I0128 18:03:33.756156 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:33 crc kubenswrapper[4877]: I0128 18:03:33.756709 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.185653 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" podUID="8df1e028-d1c7-4b68-b63a-8cc8e762b59d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506659 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506698 4877 patch_prober.go:28] interesting pod/route-controller-manager-77c8bf88f9-l4t7r container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506728 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" podUID="59c5ea7f-f3a1-4fa3-882c-5690f3af3026" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506781 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506658 4877 patch_prober.go:28] interesting pod/route-controller-manager-77c8bf88f9-l4t7r container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506798 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506877 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" podUID="59c5ea7f-f3a1-4fa3-882c-5690f3af3026" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.506725 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.668687 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.668725 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="controller" probeResult="failure" output="Get \"http://127.0.0.1:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.668687 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.668795 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="metallb-system/frr-k8s-qn628" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.671611 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="frr" containerStatusID={"Type":"cri-o","ID":"615e8f68029597b333017bdeec7de9fac311eb0fe14559de00603402f219f7e3"} pod="metallb-system/frr-k8s-qn628" containerMessage="Container frr failed liveness probe, will be restarted" Jan 28 18:03:34 crc kubenswrapper[4877]: I0128 18:03:34.671778 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" containerID="cri-o://615e8f68029597b333017bdeec7de9fac311eb0fe14559de00603402f219f7e3" gracePeriod=2 Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:34.757030 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:34.757463 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:34.857309 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:35 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:35 crc kubenswrapper[4877]: > Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:34.859004 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:35 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:35 crc kubenswrapper[4877]: > Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:34.945695 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" podUID="04c21939-4136-40d4-9569-ea4f0bc523c4" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:34.945768 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-8znpl" podUID="04c21939-4136-40d4-9569-ea4f0bc523c4" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.95:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:34.996858 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" podUID="c963fe29-366b-4362-9a13-89423728237d" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:35.394269 4877 generic.go:334] "Generic (PLEG): container finished" podID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerID="615e8f68029597b333017bdeec7de9fac311eb0fe14559de00603402f219f7e3" exitCode=143 Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:35.428162 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerDied","Data":"615e8f68029597b333017bdeec7de9fac311eb0fe14559de00603402f219f7e3"} Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:35.764963 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="5b2e9ef8-3e88-4cec-bda6-2c143670f73a" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:35.901629 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-s7mdg" podUID="1f8e6a17-6325-49ef-88be-7be71a431bd9" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:35.901637 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-s7mdg" podUID="1f8e6a17-6325-49ef-88be-7be71a431bd9" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:35.993737 4877 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-pl69c container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.51:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:35 crc kubenswrapper[4877]: I0128 18:03:35.993812 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" podUID="8edb732d-043a-4fbf-b0b1-da98fdaa9a84" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.51:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:36 crc kubenswrapper[4877]: I0128 18:03:36.401412 4877 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-wkbs4 container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:36 crc kubenswrapper[4877]: I0128 18:03:36.401775 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" podUID="ea81a55a-52f7-471b-bff8-9b49e05d459a" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:36 crc kubenswrapper[4877]: I0128 18:03:36.503449 4877 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-8zdq7 container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:36 crc kubenswrapper[4877]: I0128 18:03:36.503571 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" podUID="094adba0-094e-453d-87f4-b2098f9fe680" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:36 crc kubenswrapper[4877]: I0128 18:03:36.534751 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msptv" event={"ID":"88ff251b-c893-4ea0-baa4-7d69abc30ac1","Type":"ContainerStarted","Data":"a00223510ff917981513137208e2d8c83bc1cdacee9a730a724ff2185d58f5df"} Jan 28 18:03:36 crc kubenswrapper[4877]: I0128 18:03:36.960775 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-msptv" podStartSLOduration=18.858213413 podStartE2EDuration="53.937098743s" podCreationTimestamp="2026-01-28 18:02:43 +0000 UTC" firstStartedPulling="2026-01-28 18:02:59.31122044 +0000 UTC m=+5282.869547318" lastFinishedPulling="2026-01-28 18:03:34.39010576 +0000 UTC m=+5317.948432648" observedRunningTime="2026-01-28 18:03:36.934318028 +0000 UTC m=+5320.492644916" watchObservedRunningTime="2026-01-28 18:03:36.937098743 +0000 UTC m=+5320.495425641" Jan 28 18:03:37 crc kubenswrapper[4877]: I0128 18:03:37.064847 4877 patch_prober.go:28] interesting pod/metrics-server-fbbd74554-qkt8l container/metrics-server namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:37 crc kubenswrapper[4877]: I0128 18:03:37.064912 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:37 crc kubenswrapper[4877]: I0128 18:03:37.064847 4877 patch_prober.go:28] interesting pod/metrics-server-fbbd74554-qkt8l container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:37 crc kubenswrapper[4877]: I0128 18:03:37.065057 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:37 crc kubenswrapper[4877]: I0128 18:03:37.213965 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-4m9v2 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:37 crc kubenswrapper[4877]: I0128 18:03:37.214052 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" podUID="f014bcf5-ec99-4a23-a06c-29c2e8213375" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.54:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:37.322824 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-h6fl4 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:37.322884 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" podUID="d4a36cc7-b86f-45f7-a422-db7241ba6513" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:37.777068 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qn628" event={"ID":"e25ef10a-92ae-45b2-9467-7f15b523a8a1","Type":"ContainerStarted","Data":"360170daf5dbd521bdeea860c552bfa986c2384bf1d4749fd148d90f90ab1985"} Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:37.980649 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.284291 4877 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.284347 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.438732 4877 patch_prober.go:28] interesting pod/loki-operator-controller-manager-dd586d7ff-8kr7k container/manager namespace/openshift-operators-redhat: Readiness probe status=failure output="Get \"http://10.217.0.48:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.438789 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators-redhat/loki-operator-controller-manager-dd586d7ff-8kr7k" podUID="930eb8ce-5fe9-4e7d-a700-d52614d5915a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.48:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.553814 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-qn628" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.759192 4877 patch_prober.go:28] interesting pod/thanos-querier-d6bfd8f44-zx8vx container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.73:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.759260 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" podUID="88f59584-7374-4487-a7ed-970ea8a838c0" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.73:9091/-/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.932633 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-qn628" Jan 28 18:03:38 crc kubenswrapper[4877]: I0128 18:03:38.999749 4877 patch_prober.go:28] interesting pod/console-6c679b788d-zft7r container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.000150 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-6c679b788d-zft7r" podUID="1a65e44f-17f2-42b4-80f9-5f4f06368d7d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.138:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.415464 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.445854 4877 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-q9rx2 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.446261 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" podUID="92f14a91-e41e-4b81-bec5-ea6cf4a3037a" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.910031 4877 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Readiness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.910130 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.911229 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.911262 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.911268 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.911320 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.965571 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="06cc206a-5856-43ec-a04b-b2d51224314d" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.175:9090/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:39 crc kubenswrapper[4877]: I0128 18:03:39.965974 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="06cc206a-5856-43ec-a04b-b2d51224314d" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.175:9090/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.538213 4877 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-6q4hg container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.538734 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" podUID="a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.539149 4877 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-6q4hg container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.539169 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-6q4hg" podUID="a6ee20f2-cf0c-470f-a536-9b6f0dfaee7f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.34:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.564223 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.564325 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.564525 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.564365 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.755687 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="5fbd26ca-eb13-4e63-b055-3ee514dbcea6" containerName="prometheus" probeResult="failure" output="command timed out" Jan 28 18:03:40 crc kubenswrapper[4877]: I0128 18:03:40.755963 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="5fbd26ca-eb13-4e63-b055-3ee514dbcea6" containerName="prometheus" probeResult="failure" output="command timed out" Jan 28 18:03:41 crc kubenswrapper[4877]: I0128 18:03:41.369687 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-7478f7dbf9-qrkvb" podUID="e068371c-e59c-4e57-8fd3-a55470f67063" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:41 crc kubenswrapper[4877]: I0128 18:03:41.411367 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7f86f8796f-4xxbg" podUID="f24bb145-227c-43be-b63a-d606c168241b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:41 crc kubenswrapper[4877]: I0128 18:03:41.451756 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-vsnlm" podUID="970f0fe7-15e4-4fcf-bca0-eb07b26ba94a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:41 crc kubenswrapper[4877]: I0128 18:03:41.522716 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-pvxjd" podUID="c2d80875-c32f-4596-a44d-6a4b9d524304" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.106:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:41 crc kubenswrapper[4877]: I0128 18:03:41.793689 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-598f7747c9-z46t2" podUID="434f69b5-0d70-418d-aa5e-04e307a5399c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:41 crc kubenswrapper[4877]: I0128 18:03:41.873761 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"3b1158d685496e843371473bdeec73e64df842a9d2ab4be2675e257520e33a18"} Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.035130 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/certified-operators-8q65r" podUID="d0599e47-e131-43e4-a9f4-f362b888c964" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:42 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:42 crc kubenswrapper[4877]: > Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.035381 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/certified-operators-8q65r" podUID="d0599e47-e131-43e4-a9f4-f362b888c964" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:42 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:42 crc kubenswrapper[4877]: > Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.053704 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-88b5k" podUID="337c06ec-4c42-41a1-8faa-60338d4eeddc" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.111:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.053787 4877 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-2cprx container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.053851 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" podUID="38cddd18-356a-4be1-8e45-b908361805bf" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.054036 4877 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-2cprx container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.054092 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-2cprx" podUID="38cddd18-356a-4be1-8e45-b908361805bf" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.8:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.094800 4877 patch_prober.go:28] interesting pod/oauth-openshift-7448d7568b-r4ttp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.094831 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/mariadb-operator-controller-manager-6b9fb5fdcb-pm47m" podUID="aa23290f-1702-4c63-92c7-047d18922df9" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.094978 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" podUID="eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.094805 4877 patch_prober.go:28] interesting pod/oauth-openshift-7448d7568b-r4ttp container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.095040 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" podUID="eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.094858 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/nova-operator-controller-manager-7bdb645866-qpltb" podUID="4533e492-2631-4c22-af2f-6bec08b23280" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.113:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.137753 4877 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-mzftl container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.16:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.137820 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-mzftl" podUID="d18fe762-0cf5-444e-a4c0-28e812f435fa" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.16:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.156331 4877 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-b824q container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.68:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.156349 4877 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-b824q container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.68:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.156506 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" podUID="44b0ea14-238d-4f58-b504-b6375aa5137b" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.68:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.156412 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-b824q" podUID="44b0ea14-238d-4f58-b504-b6375aa5137b" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.68:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.214279 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-4m9v2 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.54:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.214356 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-4m9v2" podUID="f014bcf5-ec99-4a23-a06c-29c2e8213375" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.54:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.216798 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-p88fg" podUID="ddffc31e-38aa-45c6-bad8-5787adf8c7fe" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.118:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.322559 4877 patch_prober.go:28] interesting pod/logging-loki-gateway-5f6787f74d-h6fl4 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.322657 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5f6787f74d-h6fl4" podUID="d4a36cc7-b86f-45f7-a422-db7241ba6513" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.358834 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-6f75f45d54-vcxlv" podUID="dc576625-1984-4bcf-9c11-8dfbe037d0a1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.406733 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/placement-operator-controller-manager-79d5ccc684-kljnm" podUID="a18cdf94-0fd1-491c-8213-f2bd11b787e2" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.117:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.542963 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-6469dc96d7-np95m" podUID="f083a8cb-517a-40eb-8cb7-6cf7ce24fe7c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.119:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:42 crc kubenswrapper[4877]: I0128 18:03:42.584719 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/watcher-operator-controller-manager-564965969-5ndmd" podUID="c6112d71-edde-4615-9f4d-1c59cf38702d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.121:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.041874 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-tjn2b" podUID="048e6bc5-dad4-423f-a249-7c4addf02947" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.689609 4877 patch_prober.go:28] interesting pod/thanos-querier-d6bfd8f44-zx8vx container/kube-rbac-proxy-web namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.73:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.689673 4877 patch_prober.go:28] interesting pod/thanos-querier-d6bfd8f44-zx8vx container/kube-rbac-proxy-web namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.73:9091/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.690115 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" podUID="88f59584-7374-4487-a7ed-970ea8a838c0" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.73:9091/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.690199 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/thanos-querier-d6bfd8f44-zx8vx" podUID="88f59584-7374-4487-a7ed-970ea8a838c0" containerName="kube-rbac-proxy-web" probeResult="failure" output="Get \"https://10.217.0.73:9091/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.756017 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.756142 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.756929 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.756968 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-galera-0" Jan 28 18:03:43 crc kubenswrapper[4877]: I0128 18:03:43.766213 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61"} pod="openstack/openstack-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.229796 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" podUID="8df1e028-d1c7-4b68-b63a-8cc8e762b59d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.229809 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-659b944486-tdjwf" podUID="8df1e028-d1c7-4b68-b63a-8cc8e762b59d" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.122:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.423907 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.424121 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.423979 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.424193 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.424234 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.433799 4877 patch_prober.go:28] interesting pod/route-controller-manager-77c8bf88f9-l4t7r container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.433865 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" podUID="59c5ea7f-f3a1-4fa3-882c-5690f3af3026" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.433923 4877 patch_prober.go:28] interesting pod/route-controller-manager-77c8bf88f9-l4t7r container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.433936 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-77c8bf88f9-l4t7r" podUID="59c5ea7f-f3a1-4fa3-882c-5690f3af3026" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.439008 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="controller-manager" containerStatusID={"Type":"cri-o","ID":"6bcab5713b5621eb7db3cbd28a75712d6d366ef3c644f806b2269be661cd1430"} pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" containerMessage="Container controller-manager failed liveness probe, will be restarted" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.439071 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" containerID="cri-o://6bcab5713b5621eb7db3cbd28a75712d6d366ef3c644f806b2269be661cd1430" gracePeriod=30 Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.590666 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.755696 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.755774 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.755703 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.758655 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.760114 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.760396 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="5b2e9ef8-3e88-4cec-bda6-2c143670f73a" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Jan 28 18:03:44 crc kubenswrapper[4877]: I0128 18:03:44.760875 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af"} pod="openstack/openstack-cell1-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.185504 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.186294 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.755924 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/prometheus-k8s-0" podUID="5fbd26ca-eb13-4e63-b055-3ee514dbcea6" containerName="prometheus" probeResult="failure" output="command timed out" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.756426 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.756425 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-k8s-0" podUID="5fbd26ca-eb13-4e63-b055-3ee514dbcea6" containerName="prometheus" probeResult="failure" output="command timed out" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.836185 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-s7mdg" podUID="1f8e6a17-6325-49ef-88be-7be71a431bd9" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.836282 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-s7mdg" podUID="1f8e6a17-6325-49ef-88be-7be71a431bd9" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.993431 4877 patch_prober.go:28] interesting pod/logging-loki-distributor-5f678c8dd6-pl69c container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.51:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:45 crc kubenswrapper[4877]: I0128 18:03:45.993510 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-5f678c8dd6-pl69c" podUID="8edb732d-043a-4fbf-b0b1-da98fdaa9a84" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.51:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.246713 4877 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ztt5t container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.62:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.246786 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" podUID="e4d169b3-a547-428e-b407-ea1a018f7a36" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.62:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.246738 4877 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ztt5t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.62:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.247079 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ztt5t" podUID="e4d169b3-a547-428e-b407-ea1a018f7a36" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.62:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.401202 4877 patch_prober.go:28] interesting pod/logging-loki-query-frontend-69d9546745-wkbs4 container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.401627 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-69d9546745-wkbs4" podUID="ea81a55a-52f7-471b-bff8-9b49e05d459a" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.503338 4877 patch_prober.go:28] interesting pod/logging-loki-querier-76788598db-8zdq7 container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.503436 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-76788598db-8zdq7" podUID="094adba0-094e-453d-87f4-b2098f9fe680" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.52:3101/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.508301 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:46 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:46 crc kubenswrapper[4877]: > Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.508343 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:46 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:46 crc kubenswrapper[4877]: > Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.508439 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.508468 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.554622 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6"} pod="openstack-operators/openstack-operator-index-sgvqs" containerMessage="Container registry-server failed liveness probe, will be restarted" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.554701 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" containerID="cri-o://a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6" gracePeriod=30 Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.828240 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-msptv" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:46 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:46 crc kubenswrapper[4877]: > Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.852776 4877 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pwhsx container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.852861 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" podUID="17940ca1-0215-4491-a9f9-9177b04180d5" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.853122 4877 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-pwhsx container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.853176 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-pwhsx" podUID="17940ca1-0215-4491-a9f9-9177b04180d5" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.981514 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" event={"ID":"46eee1f6-965e-4d6d-a520-77b2f472b164","Type":"ContainerDied","Data":"6bcab5713b5621eb7db3cbd28a75712d6d366ef3c644f806b2269be661cd1430"} Jan 28 18:03:46 crc kubenswrapper[4877]: I0128 18:03:46.982325 4877 generic.go:334] "Generic (PLEG): container finished" podID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerID="6bcab5713b5621eb7db3cbd28a75712d6d366ef3c644f806b2269be661cd1430" exitCode=0 Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.106743 4877 patch_prober.go:28] interesting pod/metrics-server-fbbd74554-qkt8l container/metrics-server namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.107194 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" probeResult="failure" output="Get \"https://10.217.0.75:10250/livez\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.107357 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.130371 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="metrics-server" containerStatusID={"Type":"cri-o","ID":"37722aa48435b88a0829b19a19182148c36d2f40151c17a01240122654767edf"} pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" containerMessage="Container metrics-server failed liveness probe, will be restarted" Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.130695 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" podUID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerName="metrics-server" containerID="cri-o://37722aa48435b88a0829b19a19182148c36d2f40151c17a01240122654767edf" gracePeriod=170 Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.203704 4877 patch_prober.go:28] interesting pod/nmstate-webhook-8474b5b9d8-mh62n container/nmstate-webhook namespace/openshift-nmstate: Readiness probe status=failure output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.203797 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mh62n" podUID="74e879cb-e2db-473a-9248-112f04f25fa5" containerName="nmstate-webhook" probeResult="failure" output="Get \"https://10.217.0.88:9443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.325446 4877 trace.go:236] Trace[1903491940]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-cell1-server-0" (28-Jan-2026 18:03:45.179) (total time: 2138ms): Jan 28 18:03:47 crc kubenswrapper[4877]: Trace[1903491940]: [2.138988862s] [2.138988862s] END Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.457923 4877 patch_prober.go:28] interesting pod/monitoring-plugin-595f97fc4c-4kdp8 container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.76:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.457983 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-595f97fc4c-4kdp8" podUID="e831d929-583c-4da5-8ab2-27d484da84b2" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.76:9443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.976825 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" containerID="cri-o://cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" gracePeriod=26 Jan 28 18:03:47 crc kubenswrapper[4877]: I0128 18:03:47.978616 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" containerID="cri-o://93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af" gracePeriod=27 Jan 28 18:03:48 crc kubenswrapper[4877]: I0128 18:03:48.021711 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:48 crc kubenswrapper[4877]: I0128 18:03:48.021784 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:48 crc kubenswrapper[4877]: I0128 18:03:48.021852 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 18:03:48 crc kubenswrapper[4877]: I0128 18:03:48.352133 4877 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded" start-of-body= Jan 28 18:03:48 crc kubenswrapper[4877]: I0128 18:03:48.352225 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": context deadline exceeded" Jan 28 18:03:48 crc kubenswrapper[4877]: I0128 18:03:48.448276 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:48 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:48 crc kubenswrapper[4877]: > Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.052266 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" event={"ID":"46eee1f6-965e-4d6d-a520-77b2f472b164","Type":"ContainerStarted","Data":"4e2668d3a66d59b7279d0cf00e894a48280dd919f78c8600960c1b2503846939"} Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.052435 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.055526 4877 generic.go:334] "Generic (PLEG): container finished" podID="2921bb76-4308-4082-ab32-4dc817ccac74" containerID="a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6" exitCode=0 Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.055564 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sgvqs" event={"ID":"2921bb76-4308-4082-ab32-4dc817ccac74","Type":"ContainerDied","Data":"a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6"} Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.064776 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" podUID="2e00aa5f-0d94-48bb-9802-cfff5c46490f" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.065461 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.065526 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.492638 4877 patch_prober.go:28] interesting pod/console-operator-58897d9998-gb5km container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.492975 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-gb5km" podUID="7725b803-8d54-401d-bb4c-4112e90ddc0b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.492689 4877 patch_prober.go:28] interesting pod/console-operator-58897d9998-gb5km container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.493051 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-gb5km" podUID="7725b803-8d54-401d-bb4c-4112e90ddc0b" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.492728 4877 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-q9rx2 container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.493119 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-q9rx2" podUID="92f14a91-e41e-4b81-bec5-ea6cf4a3037a" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.542526 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="327e7593-5623-475c-ad8f-2456a437a645" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.759897 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="5b2e9ef8-3e88-4cec-bda6-2c143670f73a" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.911635 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.911673 4877 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnxxp container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": context deadline exceeded" start-of-body= Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.911707 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": context deadline exceeded" Jan 28 18:03:49 crc kubenswrapper[4877]: I0128 18:03:49.911706 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnxxp" podUID="fbd8dc94-00b1-4aff-a395-72702a0db6c1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.037732 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" podUID="c963fe29-366b-4362-9a13-89423728237d" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.037721 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="cert-manager/cert-manager-webhook-687f57d79b-42v7p" podUID="c963fe29-366b-4362-9a13-89423728237d" containerName="cert-manager-webhook" probeResult="failure" output="Get \"http://10.217.0.44:6080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.074272 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.074332 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.489947 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.490335 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.490874 4877 patch_prober.go:28] interesting pod/router-default-5444994796-vbvr6 container/router namespace/openshift-ingress: Liveness probe status=failure output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.490898 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-ingress/router-default-5444994796-vbvr6" podUID="0e667586-4bc0-4e00-9aec-fac9ad2b49ca" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.564619 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.564682 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.564735 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.568823 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.568875 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.568979 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.575950 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="olm-operator" containerStatusID={"Type":"cri-o","ID":"f4a1bb9d723db085fcc9bd2da9fed57cbcb72f32292466b36b2fa05977e915ee"} pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" containerMessage="Container olm-operator failed liveness probe, will be restarted" Jan 28 18:03:50 crc kubenswrapper[4877]: I0128 18:03:50.576016 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" containerID="cri-o://f4a1bb9d723db085fcc9bd2da9fed57cbcb72f32292466b36b2fa05977e915ee" gracePeriod=30 Jan 28 18:03:51 crc kubenswrapper[4877]: E0128 18:03:51.428756 4877 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod362f8c1a_4938_4ee4_853b_8f868147d732.slice/crio-f4a1bb9d723db085fcc9bd2da9fed57cbcb72f32292466b36b2fa05977e915ee.scope\": RecentStats: unable to find data in memory cache]" Jan 28 18:03:51 crc kubenswrapper[4877]: E0128 18:03:51.909755 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6 is running failed: container process not found" containerID="a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 18:03:51 crc kubenswrapper[4877]: E0128 18:03:51.910404 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6 is running failed: container process not found" containerID="a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 18:03:51 crc kubenswrapper[4877]: E0128 18:03:51.910764 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6 is running failed: container process not found" containerID="a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 18:03:51 crc kubenswrapper[4877]: E0128 18:03:51.910797 4877 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6df0d223a06a0f899582cecbcbd0f364e720e9e67c561a2920512512fe20af6 is running failed: container process not found" probeType="Readiness" pod="openstack-operators/openstack-operator-index-sgvqs" podUID="2921bb76-4308-4082-ab32-4dc817ccac74" containerName="registry-server" Jan 28 18:03:52 crc kubenswrapper[4877]: I0128 18:03:52.010579 4877 patch_prober.go:28] interesting pod/oauth-openshift-7448d7568b-r4ttp container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:52 crc kubenswrapper[4877]: I0128 18:03:52.010649 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" podUID="eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:52 crc kubenswrapper[4877]: I0128 18:03:52.010920 4877 patch_prober.go:28] interesting pod/oauth-openshift-7448d7568b-r4ttp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:03:52 crc kubenswrapper[4877]: I0128 18:03:52.010990 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-7448d7568b-r4ttp" podUID="eb0f9ed6-dd3e-4abc-9a57-d9e5c4be0115" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:03:52 crc kubenswrapper[4877]: I0128 18:03:52.101934 4877 generic.go:334] "Generic (PLEG): container finished" podID="362f8c1a-4938-4ee4-853b-8f868147d732" containerID="f4a1bb9d723db085fcc9bd2da9fed57cbcb72f32292466b36b2fa05977e915ee" exitCode=0 Jan 28 18:03:52 crc kubenswrapper[4877]: I0128 18:03:52.102189 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" event={"ID":"362f8c1a-4938-4ee4-853b-8f868147d732","Type":"ContainerDied","Data":"f4a1bb9d723db085fcc9bd2da9fed57cbcb72f32292466b36b2fa05977e915ee"} Jan 28 18:03:52 crc kubenswrapper[4877]: E0128 18:03:52.341952 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:03:52 crc kubenswrapper[4877]: I0128 18:03:52.344218 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="327e7593-5623-475c-ad8f-2456a437a645" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:03:52 crc kubenswrapper[4877]: E0128 18:03:52.348729 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:03:52 crc kubenswrapper[4877]: E0128 18:03:52.356038 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:03:52 crc kubenswrapper[4877]: E0128 18:03:52.356096 4877 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.116622 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-sgvqs" event={"ID":"2921bb76-4308-4082-ab32-4dc817ccac74","Type":"ContainerStarted","Data":"eafdb015acf81bb499c1d6fa544460ca817063d5b4e8c5a6ee1d89bc49a7cb45"} Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.124143 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" event={"ID":"362f8c1a-4938-4ee4-853b-8f868147d732","Type":"ContainerStarted","Data":"da38296dfac4c43b890bbd9595bb06f0285f994b94f8d6f73953232f230c6a5e"} Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.125778 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.125853 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.125887 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.128641 4877 generic.go:334] "Generic (PLEG): container finished" podID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerID="93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af" exitCode=0 Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.128693 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c","Type":"ContainerDied","Data":"93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af"} Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.424681 4877 patch_prober.go:28] interesting pod/controller-manager-678cc9d6c4-99z4w container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Jan 28 18:03:53 crc kubenswrapper[4877]: I0128 18:03:53.424764 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" podUID="46eee1f6-965e-4d6d-a520-77b2f472b164" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Jan 28 18:03:53 crc kubenswrapper[4877]: E0128 18:03:53.548470 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af is running failed: container process not found" containerID="93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:03:53 crc kubenswrapper[4877]: E0128 18:03:53.549346 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af is running failed: container process not found" containerID="93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:03:53 crc kubenswrapper[4877]: E0128 18:03:53.549752 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af is running failed: container process not found" containerID="93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:03:53 crc kubenswrapper[4877]: E0128 18:03:53.549823 4877 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 93c1df4964bdc35d8071628b56d7fb231f1b5bd6b350e5064e4eee566a8433af is running failed: container process not found" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" Jan 28 18:03:54 crc kubenswrapper[4877]: I0128 18:03:54.142509 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c","Type":"ContainerStarted","Data":"054727f6cb467a627d38bba4aace98222046762f6e42c7c3708ff52e14e307df"} Jan 28 18:03:54 crc kubenswrapper[4877]: I0128 18:03:54.143441 4877 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-n9qwr container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 28 18:03:54 crc kubenswrapper[4877]: I0128 18:03:54.143528 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" podUID="362f8c1a-4938-4ee4-853b-8f868147d732" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.191010 4877 generic.go:334] "Generic (PLEG): container finished" podID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerID="843742a09a04c38955284b002724373ecec2529696eb42253bf00f81d1a325ef" exitCode=0 Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.191103 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs7bc" event={"ID":"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9","Type":"ContainerDied","Data":"843742a09a04c38955284b002724373ecec2529696eb42253bf00f81d1a325ef"} Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.209515 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/1.log" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.213604 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/0.log" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.214552 4877 generic.go:334] "Generic (PLEG): container finished" podID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerID="1d6b80ce5f7fb85778eeff2a24ed478861089cbb66c8347538a0400dc8d2a031" exitCode=137 Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.215932 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerDied","Data":"1d6b80ce5f7fb85778eeff2a24ed478861089cbb66c8347538a0400dc8d2a031"} Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.229417 4877 scope.go:117] "RemoveContainer" containerID="24f13f5541ec41b3d46499d7abef67bd3697d72343bff75dad48f6a125abd07d" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.258576 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-n9qwr" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.398414 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="327e7593-5623-475c-ad8f-2456a437a645" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.398769 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.401925 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-scheduler" containerStatusID={"Type":"cri-o","ID":"f772389235174d1da19b8526fef28b2c4ba0985cd362c88612ef554252adda2c"} pod="openstack/cinder-scheduler-0" containerMessage="Container cinder-scheduler failed liveness probe, will be restarted" Jan 28 18:03:55 crc kubenswrapper[4877]: I0128 18:03:55.402005 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="327e7593-5623-475c-ad8f-2456a437a645" containerName="cinder-scheduler" containerID="cri-o://f772389235174d1da19b8526fef28b2c4ba0985cd362c88612ef554252adda2c" gracePeriod=30 Jan 28 18:03:56 crc kubenswrapper[4877]: I0128 18:03:56.261958 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/1.log" Jan 28 18:03:56 crc kubenswrapper[4877]: I0128 18:03:56.330226 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-msptv" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" probeResult="failure" output=< Jan 28 18:03:56 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:03:56 crc kubenswrapper[4877]: > Jan 28 18:03:56 crc kubenswrapper[4877]: I0128 18:03:56.943520 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b8548zvdb" Jan 28 18:03:57 crc kubenswrapper[4877]: I0128 18:03:57.296184 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/1.log" Jan 28 18:03:57 crc kubenswrapper[4877]: I0128 18:03:57.298808 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerStarted","Data":"c1ddd1231e4a1ac3cb2a2cf11594cb774e305c83273aa52b1b86e9fe34f15366"} Jan 28 18:03:57 crc kubenswrapper[4877]: I0128 18:03:57.305273 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs7bc" event={"ID":"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9","Type":"ContainerStarted","Data":"3e7f3f24d2bbe3dde061c0b2653167afd0c4834cbfb1d5de426c37b417dc0c78"} Jan 28 18:03:57 crc kubenswrapper[4877]: I0128 18:03:57.357945 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vs7bc" podStartSLOduration=17.316241101 podStartE2EDuration="1m14.357911503s" podCreationTimestamp="2026-01-28 18:02:43 +0000 UTC" firstStartedPulling="2026-01-28 18:02:59.311424555 +0000 UTC m=+5282.869751443" lastFinishedPulling="2026-01-28 18:03:56.353094947 +0000 UTC m=+5339.911421845" observedRunningTime="2026-01-28 18:03:57.353775932 +0000 UTC m=+5340.912102840" watchObservedRunningTime="2026-01-28 18:03:57.357911503 +0000 UTC m=+5340.916238391" Jan 28 18:04:01 crc kubenswrapper[4877]: I0128 18:04:01.892584 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 18:04:01 crc kubenswrapper[4877]: I0128 18:04:01.893582 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 18:04:01 crc kubenswrapper[4877]: I0128 18:04:01.900924 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:04:01 crc kubenswrapper[4877]: I0128 18:04:01.901247 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:04:02 crc kubenswrapper[4877]: E0128 18:04:02.342338 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:04:02 crc kubenswrapper[4877]: E0128 18:04:02.344772 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:04:02 crc kubenswrapper[4877]: E0128 18:04:02.346377 4877 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 28 18:04:02 crc kubenswrapper[4877]: E0128 18:04:02.346414 4877 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="6cf870d4-330c-490b-8fcc-77028d084de4" containerName="galera" Jan 28 18:04:02 crc kubenswrapper[4877]: I0128 18:04:02.443267 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 18:04:02 crc kubenswrapper[4877]: I0128 18:04:02.531607 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-sgvqs" Jan 28 18:04:02 crc kubenswrapper[4877]: I0128 18:04:02.965149 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:02 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:02 crc kubenswrapper[4877]: > Jan 28 18:04:03 crc kubenswrapper[4877]: I0128 18:04:03.415182 4877 generic.go:334] "Generic (PLEG): container finished" podID="6cf870d4-330c-490b-8fcc-77028d084de4" containerID="cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61" exitCode=0 Jan 28 18:04:03 crc kubenswrapper[4877]: I0128 18:04:03.415293 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6cf870d4-330c-490b-8fcc-77028d084de4","Type":"ContainerDied","Data":"cc3e75950cda499aa563bbf84f763f05b9b143b4b68aa175824f43392e681d61"} Jan 28 18:04:03 crc kubenswrapper[4877]: I0128 18:04:03.428061 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-678cc9d6c4-99z4w" Jan 28 18:04:03 crc kubenswrapper[4877]: I0128 18:04:03.547392 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 28 18:04:03 crc kubenswrapper[4877]: I0128 18:04:03.547447 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 28 18:04:05 crc kubenswrapper[4877]: I0128 18:04:05.199166 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:04:05 crc kubenswrapper[4877]: I0128 18:04:05.199568 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:04:05 crc kubenswrapper[4877]: I0128 18:04:05.446933 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6cf870d4-330c-490b-8fcc-77028d084de4","Type":"ContainerStarted","Data":"4108b41100320bb852cbcca548bb93dd2aa7f4cef38a5ddf7b847e7724d5b5f7"} Jan 28 18:04:06 crc kubenswrapper[4877]: I0128 18:04:06.238267 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-msptv" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:06 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:06 crc kubenswrapper[4877]: > Jan 28 18:04:06 crc kubenswrapper[4877]: I0128 18:04:06.272763 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vs7bc" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:06 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:06 crc kubenswrapper[4877]: > Jan 28 18:04:06 crc kubenswrapper[4877]: I0128 18:04:06.462169 4877 generic.go:334] "Generic (PLEG): container finished" podID="327e7593-5623-475c-ad8f-2456a437a645" containerID="f772389235174d1da19b8526fef28b2c4ba0985cd362c88612ef554252adda2c" exitCode=0 Jan 28 18:04:06 crc kubenswrapper[4877]: I0128 18:04:06.462250 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"327e7593-5623-475c-ad8f-2456a437a645","Type":"ContainerDied","Data":"f772389235174d1da19b8526fef28b2c4ba0985cd362c88612ef554252adda2c"} Jan 28 18:04:09 crc kubenswrapper[4877]: I0128 18:04:09.455970 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 28 18:04:09 crc kubenswrapper[4877]: I0128 18:04:09.709727 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 28 18:04:12 crc kubenswrapper[4877]: I0128 18:04:12.340284 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 28 18:04:12 crc kubenswrapper[4877]: I0128 18:04:12.340875 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 28 18:04:12 crc kubenswrapper[4877]: I0128 18:04:12.649098 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 28 18:04:12 crc kubenswrapper[4877]: I0128 18:04:12.811198 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 28 18:04:13 crc kubenswrapper[4877]: I0128 18:04:13.549607 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"327e7593-5623-475c-ad8f-2456a437a645","Type":"ContainerStarted","Data":"2225e47f6937d35541903b4427308985a95dbab2f07e14219b046de21d15e1b5"} Jan 28 18:04:13 crc kubenswrapper[4877]: I0128 18:04:13.806936 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:13 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:13 crc kubenswrapper[4877]: > Jan 28 18:04:16 crc kubenswrapper[4877]: I0128 18:04:16.241368 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-msptv" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:16 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:16 crc kubenswrapper[4877]: > Jan 28 18:04:16 crc kubenswrapper[4877]: I0128 18:04:16.254659 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vs7bc" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:16 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:16 crc kubenswrapper[4877]: > Jan 28 18:04:16 crc kubenswrapper[4877]: I0128 18:04:16.312183 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 18:04:21 crc kubenswrapper[4877]: I0128 18:04:21.391585 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 28 18:04:22 crc kubenswrapper[4877]: I0128 18:04:22.967197 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:22 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:22 crc kubenswrapper[4877]: > Jan 28 18:04:26 crc kubenswrapper[4877]: I0128 18:04:26.245193 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-msptv" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:26 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:26 crc kubenswrapper[4877]: > Jan 28 18:04:26 crc kubenswrapper[4877]: I0128 18:04:26.257274 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vs7bc" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:26 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:26 crc kubenswrapper[4877]: > Jan 28 18:04:32 crc kubenswrapper[4877]: I0128 18:04:32.954204 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:32 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:32 crc kubenswrapper[4877]: > Jan 28 18:04:36 crc kubenswrapper[4877]: I0128 18:04:36.240043 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-msptv" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:36 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:36 crc kubenswrapper[4877]: > Jan 28 18:04:36 crc kubenswrapper[4877]: I0128 18:04:36.247016 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vs7bc" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:36 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:36 crc kubenswrapper[4877]: > Jan 28 18:04:42 crc kubenswrapper[4877]: I0128 18:04:42.963367 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:42 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:42 crc kubenswrapper[4877]: > Jan 28 18:04:45 crc kubenswrapper[4877]: I0128 18:04:45.467134 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:04:45 crc kubenswrapper[4877]: I0128 18:04:45.516597 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:04:45 crc kubenswrapper[4877]: I0128 18:04:45.802617 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-msptv"] Jan 28 18:04:46 crc kubenswrapper[4877]: I0128 18:04:46.253433 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vs7bc" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:46 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:46 crc kubenswrapper[4877]: > Jan 28 18:04:46 crc kubenswrapper[4877]: I0128 18:04:46.975790 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-msptv" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" containerID="cri-o://a00223510ff917981513137208e2d8c83bc1cdacee9a730a724ff2185d58f5df" gracePeriod=2 Jan 28 18:04:47 crc kubenswrapper[4877]: I0128 18:04:47.991916 4877 generic.go:334] "Generic (PLEG): container finished" podID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerID="a00223510ff917981513137208e2d8c83bc1cdacee9a730a724ff2185d58f5df" exitCode=0 Jan 28 18:04:47 crc kubenswrapper[4877]: I0128 18:04:47.992006 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msptv" event={"ID":"88ff251b-c893-4ea0-baa4-7d69abc30ac1","Type":"ContainerDied","Data":"a00223510ff917981513137208e2d8c83bc1cdacee9a730a724ff2185d58f5df"} Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.643223 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.829304 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-utilities\") pod \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.829535 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-catalog-content\") pod \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.829613 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zx9lc\" (UniqueName: \"kubernetes.io/projected/88ff251b-c893-4ea0-baa4-7d69abc30ac1-kube-api-access-zx9lc\") pod \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\" (UID: \"88ff251b-c893-4ea0-baa4-7d69abc30ac1\") " Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.839830 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-utilities" (OuterVolumeSpecName: "utilities") pod "88ff251b-c893-4ea0-baa4-7d69abc30ac1" (UID: "88ff251b-c893-4ea0-baa4-7d69abc30ac1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.863237 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88ff251b-c893-4ea0-baa4-7d69abc30ac1-kube-api-access-zx9lc" (OuterVolumeSpecName: "kube-api-access-zx9lc") pod "88ff251b-c893-4ea0-baa4-7d69abc30ac1" (UID: "88ff251b-c893-4ea0-baa4-7d69abc30ac1"). InnerVolumeSpecName "kube-api-access-zx9lc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.887286 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88ff251b-c893-4ea0-baa4-7d69abc30ac1" (UID: "88ff251b-c893-4ea0-baa4-7d69abc30ac1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.932215 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.932573 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zx9lc\" (UniqueName: \"kubernetes.io/projected/88ff251b-c893-4ea0-baa4-7d69abc30ac1-kube-api-access-zx9lc\") on node \"crc\" DevicePath \"\"" Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.932594 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88ff251b-c893-4ea0-baa4-7d69abc30ac1-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:04:52 crc kubenswrapper[4877]: I0128 18:04:52.970322 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:52 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:52 crc kubenswrapper[4877]: > Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.045402 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msptv" event={"ID":"88ff251b-c893-4ea0-baa4-7d69abc30ac1","Type":"ContainerDied","Data":"c185fc89021dfe66164c12807d1ae4a288e14c761b9379fdf3c0e2b51644c195"} Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.045502 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msptv" Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.050522 4877 scope.go:117] "RemoveContainer" containerID="a00223510ff917981513137208e2d8c83bc1cdacee9a730a724ff2185d58f5df" Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.116396 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-msptv"] Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.126919 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-msptv"] Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.315770 4877 scope.go:117] "RemoveContainer" containerID="6c643b54bc518d99796d626c7b93289eb691e36cfe57cd13691bf9ac08cebf8e" Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.352760 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" path="/var/lib/kubelet/pods/88ff251b-c893-4ea0-baa4-7d69abc30ac1/volumes" Jan 28 18:04:53 crc kubenswrapper[4877]: I0128 18:04:53.779534 4877 scope.go:117] "RemoveContainer" containerID="537b0410574daa840ae1ba0f6ab3215df12c7ab196dfba12e5cc2893447f2ce8" Jan 28 18:04:57 crc kubenswrapper[4877]: I0128 18:04:57.109954 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vs7bc" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" probeResult="failure" output=< Jan 28 18:04:57 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:04:57 crc kubenswrapper[4877]: > Jan 28 18:05:02 crc kubenswrapper[4877]: I0128 18:05:02.947514 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:05:02 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:05:02 crc kubenswrapper[4877]: > Jan 28 18:05:04 crc kubenswrapper[4877]: I0128 18:05:04.600333 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:05:04 crc kubenswrapper[4877]: I0128 18:05:04.756505 4877 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:05:04 crc kubenswrapper[4877]: I0128 18:05:04.756833 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="5e8b9412-b4c3-411a-9b83-fe67dfe2cc1c" containerName="galera" probeResult="failure" output="command timed out" Jan 28 18:05:05 crc kubenswrapper[4877]: I0128 18:05:05.489053 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:05:05 crc kubenswrapper[4877]: I0128 18:05:05.560867 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:05:05 crc kubenswrapper[4877]: I0128 18:05:05.737750 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vs7bc"] Jan 28 18:05:07 crc kubenswrapper[4877]: I0128 18:05:07.221728 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vs7bc" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" containerID="cri-o://3e7f3f24d2bbe3dde061c0b2653167afd0c4834cbfb1d5de426c37b417dc0c78" gracePeriod=2 Jan 28 18:05:08 crc kubenswrapper[4877]: I0128 18:05:08.235658 4877 generic.go:334] "Generic (PLEG): container finished" podID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerID="3e7f3f24d2bbe3dde061c0b2653167afd0c4834cbfb1d5de426c37b417dc0c78" exitCode=0 Jan 28 18:05:08 crc kubenswrapper[4877]: I0128 18:05:08.235773 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs7bc" event={"ID":"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9","Type":"ContainerDied","Data":"3e7f3f24d2bbe3dde061c0b2653167afd0c4834cbfb1d5de426c37b417dc0c78"} Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.007803 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.121513 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh2mm\" (UniqueName: \"kubernetes.io/projected/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-kube-api-access-xh2mm\") pod \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.121572 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-catalog-content\") pod \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.121832 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-utilities\") pod \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\" (UID: \"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9\") " Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.122793 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-utilities" (OuterVolumeSpecName: "utilities") pod "bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" (UID: "bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.123315 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.128529 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-kube-api-access-xh2mm" (OuterVolumeSpecName: "kube-api-access-xh2mm") pod "bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" (UID: "bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9"). InnerVolumeSpecName "kube-api-access-xh2mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.225189 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xh2mm\" (UniqueName: \"kubernetes.io/projected/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-kube-api-access-xh2mm\") on node \"crc\" DevicePath \"\"" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.225810 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" (UID: "bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.250119 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vs7bc" event={"ID":"bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9","Type":"ContainerDied","Data":"4e0cf531afc333b0ec63694a8aa0b99002cddac6bb4f88ba41a7adab1561e233"} Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.250178 4877 scope.go:117] "RemoveContainer" containerID="3e7f3f24d2bbe3dde061c0b2653167afd0c4834cbfb1d5de426c37b417dc0c78" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.250332 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vs7bc" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.293285 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vs7bc"] Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.297177 4877 scope.go:117] "RemoveContainer" containerID="843742a09a04c38955284b002724373ecec2529696eb42253bf00f81d1a325ef" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.316338 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vs7bc"] Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.328055 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.349997 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" path="/var/lib/kubelet/pods/bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9/volumes" Jan 28 18:05:09 crc kubenswrapper[4877]: I0128 18:05:09.369564 4877 scope.go:117] "RemoveContainer" containerID="d4bbd41832676d25b154042830ac3fbe76b430f639bbc400f58031926108755c" Jan 28 18:05:12 crc kubenswrapper[4877]: I0128 18:05:12.954510 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:05:12 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:05:12 crc kubenswrapper[4877]: > Jan 28 18:05:22 crc kubenswrapper[4877]: I0128 18:05:22.956673 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:05:22 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:05:22 crc kubenswrapper[4877]: > Jan 28 18:05:33 crc kubenswrapper[4877]: I0128 18:05:33.116833 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:05:33 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:05:33 crc kubenswrapper[4877]: > Jan 28 18:05:33 crc kubenswrapper[4877]: I0128 18:05:33.117391 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:05:33 crc kubenswrapper[4877]: I0128 18:05:33.119028 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"c1ddd1231e4a1ac3cb2a2cf11594cb774e305c83273aa52b1b86e9fe34f15366"} pod="openshift-marketplace/redhat-operators-9lmsk" containerMessage="Container registry-server failed startup probe, will be restarted" Jan 28 18:05:33 crc kubenswrapper[4877]: I0128 18:05:33.119197 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" containerID="cri-o://c1ddd1231e4a1ac3cb2a2cf11594cb774e305c83273aa52b1b86e9fe34f15366" gracePeriod=30 Jan 28 18:06:03 crc kubenswrapper[4877]: I0128 18:06:03.953568 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/2.log" Jan 28 18:06:03 crc kubenswrapper[4877]: I0128 18:06:03.962212 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/1.log" Jan 28 18:06:03 crc kubenswrapper[4877]: I0128 18:06:03.965257 4877 generic.go:334] "Generic (PLEG): container finished" podID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerID="c1ddd1231e4a1ac3cb2a2cf11594cb774e305c83273aa52b1b86e9fe34f15366" exitCode=137 Jan 28 18:06:03 crc kubenswrapper[4877]: I0128 18:06:03.965307 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerDied","Data":"c1ddd1231e4a1ac3cb2a2cf11594cb774e305c83273aa52b1b86e9fe34f15366"} Jan 28 18:06:03 crc kubenswrapper[4877]: I0128 18:06:03.965343 4877 scope.go:117] "RemoveContainer" containerID="1d6b80ce5f7fb85778eeff2a24ed478861089cbb66c8347538a0400dc8d2a031" Jan 28 18:06:04 crc kubenswrapper[4877]: I0128 18:06:04.978580 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/2.log" Jan 28 18:06:05 crc kubenswrapper[4877]: I0128 18:06:05.995001 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/2.log" Jan 28 18:06:06 crc kubenswrapper[4877]: I0128 18:06:05.996070 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerStarted","Data":"1d936a76294113c4eecb55fbe0651a6d7261d4896cfabd44a37987091720065d"} Jan 28 18:06:07 crc kubenswrapper[4877]: I0128 18:06:07.075863 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:06:07 crc kubenswrapper[4877]: I0128 18:06:07.076200 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:06:11 crc kubenswrapper[4877]: I0128 18:06:11.900217 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:06:11 crc kubenswrapper[4877]: I0128 18:06:11.900969 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:06:12 crc kubenswrapper[4877]: I0128 18:06:12.963940 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:06:12 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:06:12 crc kubenswrapper[4877]: > Jan 28 18:06:18 crc kubenswrapper[4877]: I0128 18:06:18.128065 4877 generic.go:334] "Generic (PLEG): container finished" podID="7829fe04-318e-4cda-adb5-4109e6d6f751" containerID="37722aa48435b88a0829b19a19182148c36d2f40151c17a01240122654767edf" exitCode=0 Jan 28 18:06:18 crc kubenswrapper[4877]: I0128 18:06:18.128114 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" event={"ID":"7829fe04-318e-4cda-adb5-4109e6d6f751","Type":"ContainerDied","Data":"37722aa48435b88a0829b19a19182148c36d2f40151c17a01240122654767edf"} Jan 28 18:06:18 crc kubenswrapper[4877]: I0128 18:06:18.128565 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" event={"ID":"7829fe04-318e-4cda-adb5-4109e6d6f751","Type":"ContainerStarted","Data":"aefcdedeb2fb270adcea7565c9e9bcd73e01a160d65898a43277f0f49b6f9e7a"} Jan 28 18:06:22 crc kubenswrapper[4877]: I0128 18:06:22.950038 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" probeResult="failure" output=< Jan 28 18:06:22 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:06:22 crc kubenswrapper[4877]: > Jan 28 18:06:31 crc kubenswrapper[4877]: I0128 18:06:31.951723 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:06:32 crc kubenswrapper[4877]: I0128 18:06:32.006328 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:06:32 crc kubenswrapper[4877]: I0128 18:06:32.994042 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9lmsk"] Jan 28 18:06:33 crc kubenswrapper[4877]: I0128 18:06:33.298948 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9lmsk" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" containerID="cri-o://1d936a76294113c4eecb55fbe0651a6d7261d4896cfabd44a37987091720065d" gracePeriod=2 Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.313632 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9lmsk_62733c9a-12dc-4bfc-88c7-06b6cec26fc3/registry-server/2.log" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.314803 4877 generic.go:334] "Generic (PLEG): container finished" podID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerID="1d936a76294113c4eecb55fbe0651a6d7261d4896cfabd44a37987091720065d" exitCode=0 Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.314842 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerDied","Data":"1d936a76294113c4eecb55fbe0651a6d7261d4896cfabd44a37987091720065d"} Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.314880 4877 scope.go:117] "RemoveContainer" containerID="c1ddd1231e4a1ac3cb2a2cf11594cb774e305c83273aa52b1b86e9fe34f15366" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.580667 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.683408 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-catalog-content\") pod \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.683535 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-utilities\") pod \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.683606 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lg8b4\" (UniqueName: \"kubernetes.io/projected/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-kube-api-access-lg8b4\") pod \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\" (UID: \"62733c9a-12dc-4bfc-88c7-06b6cec26fc3\") " Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.690925 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-utilities" (OuterVolumeSpecName: "utilities") pod "62733c9a-12dc-4bfc-88c7-06b6cec26fc3" (UID: "62733c9a-12dc-4bfc-88c7-06b6cec26fc3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.712724 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-kube-api-access-lg8b4" (OuterVolumeSpecName: "kube-api-access-lg8b4") pod "62733c9a-12dc-4bfc-88c7-06b6cec26fc3" (UID: "62733c9a-12dc-4bfc-88c7-06b6cec26fc3"). InnerVolumeSpecName "kube-api-access-lg8b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.786591 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.786913 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lg8b4\" (UniqueName: \"kubernetes.io/projected/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-kube-api-access-lg8b4\") on node \"crc\" DevicePath \"\"" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.905181 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62733c9a-12dc-4bfc-88c7-06b6cec26fc3" (UID: "62733c9a-12dc-4bfc-88c7-06b6cec26fc3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:06:34 crc kubenswrapper[4877]: I0128 18:06:34.991780 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62733c9a-12dc-4bfc-88c7-06b6cec26fc3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:35.332244 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9lmsk" Jan 28 18:06:36 crc kubenswrapper[4877]: E0128 18:06:36.400382 4877 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.071s" Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.400440 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.400592 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9lmsk" event={"ID":"62733c9a-12dc-4bfc-88c7-06b6cec26fc3","Type":"ContainerDied","Data":"4d63a2f783f7e614eedd682e19cf2d3110394455e88a89e9fd299074d9c2fb81"} Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.400633 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.400657 4877 scope.go:117] "RemoveContainer" containerID="1d936a76294113c4eecb55fbe0651a6d7261d4896cfabd44a37987091720065d" Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.444865 4877 scope.go:117] "RemoveContainer" containerID="86b58765adba59cec0c76c3b343cf4dec07956b04467203cedbce9c55e07a440" Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.475236 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9lmsk"] Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.486112 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9lmsk"] Jan 28 18:06:36 crc kubenswrapper[4877]: I0128 18:06:36.490147 4877 scope.go:117] "RemoveContainer" containerID="7f91075cf2fd2d1c9bfd42c0a78fce631b4943a2aec863ff73da1297a04abd3d" Jan 28 18:06:37 crc kubenswrapper[4877]: I0128 18:06:37.076730 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:06:37 crc kubenswrapper[4877]: I0128 18:06:37.076794 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:06:37 crc kubenswrapper[4877]: I0128 18:06:37.344467 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" path="/var/lib/kubelet/pods/62733c9a-12dc-4bfc-88c7-06b6cec26fc3/volumes" Jan 28 18:06:56 crc kubenswrapper[4877]: I0128 18:06:56.079989 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 18:06:56 crc kubenswrapper[4877]: I0128 18:06:56.084087 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-fbbd74554-qkt8l" Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.077002 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.077722 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.077785 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.078955 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3b1158d685496e843371473bdeec73e64df842a9d2ab4be2675e257520e33a18"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.079023 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://3b1158d685496e843371473bdeec73e64df842a9d2ab4be2675e257520e33a18" gracePeriod=600 Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.792516 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="3b1158d685496e843371473bdeec73e64df842a9d2ab4be2675e257520e33a18" exitCode=0 Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.792753 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"3b1158d685496e843371473bdeec73e64df842a9d2ab4be2675e257520e33a18"} Jan 28 18:07:07 crc kubenswrapper[4877]: I0128 18:07:07.792915 4877 scope.go:117] "RemoveContainer" containerID="9db8a762a1fd41938ac35bcc3fe86d50930467886f2a9461a8ef5ae4d86a1a8a" Jan 28 18:07:08 crc kubenswrapper[4877]: I0128 18:07:08.805364 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerStarted","Data":"b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b"} Jan 28 18:09:37 crc kubenswrapper[4877]: I0128 18:09:37.076753 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:09:37 crc kubenswrapper[4877]: I0128 18:09:37.077333 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:09:58 crc kubenswrapper[4877]: I0128 18:09:58.785878 4877 generic.go:334] "Generic (PLEG): container finished" podID="979f7960-d78e-4f7d-b68b-757e70ac5378" containerID="d92475227e59e8acf98f8bbb95d2a57b0ce2be563d3c609e4d476d456393fdc5" exitCode=1 Jan 28 18:09:58 crc kubenswrapper[4877]: I0128 18:09:58.785987 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"979f7960-d78e-4f7d-b68b-757e70ac5378","Type":"ContainerDied","Data":"d92475227e59e8acf98f8bbb95d2a57b0ce2be563d3c609e4d476d456393fdc5"} Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.788137 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.812161 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"979f7960-d78e-4f7d-b68b-757e70ac5378","Type":"ContainerDied","Data":"2c935c665f00dd76f04f203bd82cc568e3029ff8016a7d609d2339581936bbee"} Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.812620 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c935c665f00dd76f04f203bd82cc568e3029ff8016a7d609d2339581936bbee" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.812698 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926084 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-workdir\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926176 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ca-certs\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926273 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-temporary\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926349 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-config-data\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926377 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926458 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926775 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ssh-key\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926800 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config-secret\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.926820 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6vxf\" (UniqueName: \"kubernetes.io/projected/979f7960-d78e-4f7d-b68b-757e70ac5378-kube-api-access-v6vxf\") pod \"979f7960-d78e-4f7d-b68b-757e70ac5378\" (UID: \"979f7960-d78e-4f7d-b68b-757e70ac5378\") " Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.927828 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-config-data" (OuterVolumeSpecName: "config-data") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.928228 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.930497 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.933802 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "test-operator-logs") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.934523 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/979f7960-d78e-4f7d-b68b-757e70ac5378-kube-api-access-v6vxf" (OuterVolumeSpecName: "kube-api-access-v6vxf") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "kube-api-access-v6vxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.972498 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.972996 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.977081 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:10:00 crc kubenswrapper[4877]: I0128 18:10:00.997196 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "979f7960-d78e-4f7d-b68b-757e70ac5378" (UID: "979f7960-d78e-4f7d-b68b-757e70ac5378"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.029953 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.029991 4877 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.030004 4877 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.030016 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6vxf\" (UniqueName: \"kubernetes.io/projected/979f7960-d78e-4f7d-b68b-757e70ac5378-kube-api-access-v6vxf\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.030027 4877 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.030039 4877 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/979f7960-d78e-4f7d-b68b-757e70ac5378-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.030049 4877 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/979f7960-d78e-4f7d-b68b-757e70ac5378-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.030060 4877 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/979f7960-d78e-4f7d-b68b-757e70ac5378-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.030640 4877 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.063187 4877 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 18:10:01 crc kubenswrapper[4877]: I0128 18:10:01.137714 4877 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 18:10:07 crc kubenswrapper[4877]: I0128 18:10:07.076338 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:10:07 crc kubenswrapper[4877]: I0128 18:10:07.076840 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.927174 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930110 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930446 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930597 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="extract-utilities" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930614 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="extract-utilities" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930635 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="extract-utilities" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930644 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="extract-utilities" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930668 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930678 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930701 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="extract-utilities" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930713 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="extract-utilities" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930732 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930740 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930752 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="extract-content" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930760 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="extract-content" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930775 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930783 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930792 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="extract-content" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930800 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="extract-content" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930836 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="979f7960-d78e-4f7d-b68b-757e70ac5378" containerName="tempest-tests-tempest-tests-runner" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930845 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="979f7960-d78e-4f7d-b68b-757e70ac5378" containerName="tempest-tests-tempest-tests-runner" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930868 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930876 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: E0128 18:10:11.930893 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="extract-content" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.930900 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="extract-content" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.932113 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="979f7960-d78e-4f7d-b68b-757e70ac5378" containerName="tempest-tests-tempest-tests-runner" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.932166 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf9a10ec-fadb-42f5-80ea-7e5caa7a4dc9" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.932187 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.932199 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.932210 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="88ff251b-c893-4ea0-baa4-7d69abc30ac1" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.932226 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.941614 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.958671 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-m5q9r" Jan 28 18:10:11 crc kubenswrapper[4877]: I0128 18:10:11.990180 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.136162 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1acf98ba-f187-4d69-8da3-2df21dc120e1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.136210 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qfrp\" (UniqueName: \"kubernetes.io/projected/1acf98ba-f187-4d69-8da3-2df21dc120e1-kube-api-access-4qfrp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1acf98ba-f187-4d69-8da3-2df21dc120e1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.238862 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1acf98ba-f187-4d69-8da3-2df21dc120e1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.239186 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qfrp\" (UniqueName: \"kubernetes.io/projected/1acf98ba-f187-4d69-8da3-2df21dc120e1-kube-api-access-4qfrp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1acf98ba-f187-4d69-8da3-2df21dc120e1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.247224 4877 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1acf98ba-f187-4d69-8da3-2df21dc120e1\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.343959 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qfrp\" (UniqueName: \"kubernetes.io/projected/1acf98ba-f187-4d69-8da3-2df21dc120e1-kube-api-access-4qfrp\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1acf98ba-f187-4d69-8da3-2df21dc120e1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.385717 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1acf98ba-f187-4d69-8da3-2df21dc120e1\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:12 crc kubenswrapper[4877]: I0128 18:10:12.575942 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 28 18:10:13 crc kubenswrapper[4877]: I0128 18:10:13.316632 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 28 18:10:13 crc kubenswrapper[4877]: I0128 18:10:13.384851 4877 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:10:13 crc kubenswrapper[4877]: I0128 18:10:13.988896 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"1acf98ba-f187-4d69-8da3-2df21dc120e1","Type":"ContainerStarted","Data":"2f163abca0f641ed6ec6f57c14713d62b2f14c4d9055387da006ef048a20b9bb"} Jan 28 18:10:15 crc kubenswrapper[4877]: I0128 18:10:15.001463 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"1acf98ba-f187-4d69-8da3-2df21dc120e1","Type":"ContainerStarted","Data":"8f063cb711e8a9a3e9177e207383397b3aeae00fc33321a80fbb041c21f02810"} Jan 28 18:10:15 crc kubenswrapper[4877]: I0128 18:10:15.024700 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.683649554 podStartE2EDuration="4.024673751s" podCreationTimestamp="2026-01-28 18:10:11 +0000 UTC" firstStartedPulling="2026-01-28 18:10:13.383709465 +0000 UTC m=+5716.942036353" lastFinishedPulling="2026-01-28 18:10:14.724733662 +0000 UTC m=+5718.283060550" observedRunningTime="2026-01-28 18:10:15.013623213 +0000 UTC m=+5718.571950101" watchObservedRunningTime="2026-01-28 18:10:15.024673751 +0000 UTC m=+5718.583000639" Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.076559 4877 patch_prober.go:28] interesting pod/machine-config-daemon-6xsrm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.077176 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.077235 4877 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.078360 4877 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b"} pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.078431 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerName="machine-config-daemon" containerID="cri-o://b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" gracePeriod=600 Jan 28 18:10:37 crc kubenswrapper[4877]: E0128 18:10:37.217083 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.262648 4877 generic.go:334] "Generic (PLEG): container finished" podID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" exitCode=0 Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.262737 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" event={"ID":"95a2e787-3c51-42f8-b6fc-46b7c39ed39d","Type":"ContainerDied","Data":"b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b"} Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.262802 4877 scope.go:117] "RemoveContainer" containerID="3b1158d685496e843371473bdeec73e64df842a9d2ab4be2675e257520e33a18" Jan 28 18:10:37 crc kubenswrapper[4877]: I0128 18:10:37.264348 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:10:37 crc kubenswrapper[4877]: E0128 18:10:37.272970 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:10:49 crc kubenswrapper[4877]: I0128 18:10:49.332998 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:10:49 crc kubenswrapper[4877]: E0128 18:10:49.334363 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:11:02 crc kubenswrapper[4877]: I0128 18:11:02.945802 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l4fcp"] Jan 28 18:11:02 crc kubenswrapper[4877]: E0128 18:11:02.946919 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:11:02 crc kubenswrapper[4877]: I0128 18:11:02.946940 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:11:02 crc kubenswrapper[4877]: I0128 18:11:02.947240 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="62733c9a-12dc-4bfc-88c7-06b6cec26fc3" containerName="registry-server" Jan 28 18:11:02 crc kubenswrapper[4877]: I0128 18:11:02.949176 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.017418 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4fcp"] Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.127758 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-catalog-content\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.127862 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fm2r\" (UniqueName: \"kubernetes.io/projected/0a51a0f1-93a6-4def-afc0-d5470c6d7358-kube-api-access-2fm2r\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.127883 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-utilities\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.230102 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-catalog-content\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.230441 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fm2r\" (UniqueName: \"kubernetes.io/projected/0a51a0f1-93a6-4def-afc0-d5470c6d7358-kube-api-access-2fm2r\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.230460 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-utilities\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.230647 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-catalog-content\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.232331 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-utilities\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.250211 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fm2r\" (UniqueName: \"kubernetes.io/projected/0a51a0f1-93a6-4def-afc0-d5470c6d7358-kube-api-access-2fm2r\") pod \"community-operators-l4fcp\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.276693 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.332359 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:11:03 crc kubenswrapper[4877]: E0128 18:11:03.332735 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:11:03 crc kubenswrapper[4877]: I0128 18:11:03.959229 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4fcp"] Jan 28 18:11:04 crc kubenswrapper[4877]: I0128 18:11:04.637980 4877 generic.go:334] "Generic (PLEG): container finished" podID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerID="3c56f17d827225cd430bad48135e100e5f133eae4281d78353f9c27633830bae" exitCode=0 Jan 28 18:11:04 crc kubenswrapper[4877]: I0128 18:11:04.638025 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4fcp" event={"ID":"0a51a0f1-93a6-4def-afc0-d5470c6d7358","Type":"ContainerDied","Data":"3c56f17d827225cd430bad48135e100e5f133eae4281d78353f9c27633830bae"} Jan 28 18:11:04 crc kubenswrapper[4877]: I0128 18:11:04.638548 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4fcp" event={"ID":"0a51a0f1-93a6-4def-afc0-d5470c6d7358","Type":"ContainerStarted","Data":"bc5ea6bbdeaa7ce2a4d97f8a4b0edf58a85be752d6631db21e2838b92104d411"} Jan 28 18:11:05 crc kubenswrapper[4877]: I0128 18:11:05.843903 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8qrtc/must-gather-hzsz5"] Jan 28 18:11:05 crc kubenswrapper[4877]: I0128 18:11:05.846326 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:05 crc kubenswrapper[4877]: I0128 18:11:05.850514 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-8qrtc"/"default-dockercfg-4gpsh" Jan 28 18:11:05 crc kubenswrapper[4877]: I0128 18:11:05.860684 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8qrtc"/"openshift-service-ca.crt" Jan 28 18:11:05 crc kubenswrapper[4877]: I0128 18:11:05.860974 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8qrtc"/"kube-root-ca.crt" Jan 28 18:11:05 crc kubenswrapper[4877]: I0128 18:11:05.873702 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8qrtc/must-gather-hzsz5"] Jan 28 18:11:06 crc kubenswrapper[4877]: I0128 18:11:06.010197 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m954w\" (UniqueName: \"kubernetes.io/projected/b4aebf16-08cb-45fe-8d25-0210aade1749-kube-api-access-m954w\") pod \"must-gather-hzsz5\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:06 crc kubenswrapper[4877]: I0128 18:11:06.010269 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b4aebf16-08cb-45fe-8d25-0210aade1749-must-gather-output\") pod \"must-gather-hzsz5\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:06 crc kubenswrapper[4877]: I0128 18:11:06.113165 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m954w\" (UniqueName: \"kubernetes.io/projected/b4aebf16-08cb-45fe-8d25-0210aade1749-kube-api-access-m954w\") pod \"must-gather-hzsz5\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:06 crc kubenswrapper[4877]: I0128 18:11:06.113223 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b4aebf16-08cb-45fe-8d25-0210aade1749-must-gather-output\") pod \"must-gather-hzsz5\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:06 crc kubenswrapper[4877]: I0128 18:11:06.113815 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b4aebf16-08cb-45fe-8d25-0210aade1749-must-gather-output\") pod \"must-gather-hzsz5\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:06 crc kubenswrapper[4877]: I0128 18:11:06.145329 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m954w\" (UniqueName: \"kubernetes.io/projected/b4aebf16-08cb-45fe-8d25-0210aade1749-kube-api-access-m954w\") pod \"must-gather-hzsz5\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:06 crc kubenswrapper[4877]: I0128 18:11:06.164501 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:07 crc kubenswrapper[4877]: I0128 18:11:07.134492 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8qrtc/must-gather-hzsz5"] Jan 28 18:11:07 crc kubenswrapper[4877]: W0128 18:11:07.156726 4877 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4aebf16_08cb_45fe_8d25_0210aade1749.slice/crio-76a5b286408d8578e35fd9be74cdaa78224e60a4f0e2ec7b18a1f6bd32d7f6d7 WatchSource:0}: Error finding container 76a5b286408d8578e35fd9be74cdaa78224e60a4f0e2ec7b18a1f6bd32d7f6d7: Status 404 returned error can't find the container with id 76a5b286408d8578e35fd9be74cdaa78224e60a4f0e2ec7b18a1f6bd32d7f6d7 Jan 28 18:11:07 crc kubenswrapper[4877]: I0128 18:11:07.670517 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4fcp" event={"ID":"0a51a0f1-93a6-4def-afc0-d5470c6d7358","Type":"ContainerStarted","Data":"fb2e2e8a1257cc26322acde259aa5ac08a9ad2bc83f1ef77db42d47fee485eab"} Jan 28 18:11:07 crc kubenswrapper[4877]: I0128 18:11:07.674027 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" event={"ID":"b4aebf16-08cb-45fe-8d25-0210aade1749","Type":"ContainerStarted","Data":"76a5b286408d8578e35fd9be74cdaa78224e60a4f0e2ec7b18a1f6bd32d7f6d7"} Jan 28 18:11:16 crc kubenswrapper[4877]: I0128 18:11:16.331519 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:11:16 crc kubenswrapper[4877]: E0128 18:11:16.332585 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:11:17 crc kubenswrapper[4877]: I0128 18:11:17.785962 4877 generic.go:334] "Generic (PLEG): container finished" podID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerID="fb2e2e8a1257cc26322acde259aa5ac08a9ad2bc83f1ef77db42d47fee485eab" exitCode=0 Jan 28 18:11:17 crc kubenswrapper[4877]: I0128 18:11:17.786169 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4fcp" event={"ID":"0a51a0f1-93a6-4def-afc0-d5470c6d7358","Type":"ContainerDied","Data":"fb2e2e8a1257cc26322acde259aa5ac08a9ad2bc83f1ef77db42d47fee485eab"} Jan 28 18:11:24 crc kubenswrapper[4877]: I0128 18:11:24.602366 4877 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qn628" podUID="e25ef10a-92ae-45b2-9467-7f15b523a8a1" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:11:25 crc kubenswrapper[4877]: E0128 18:11:25.369738 4877 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-must-gather:latest" Jan 28 18:11:25 crc kubenswrapper[4877]: E0128 18:11:25.375272 4877 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 28 18:11:25 crc kubenswrapper[4877]: container &Container{Name:gather,Image:quay.io/openstack-k8s-operators/openstack-must-gather:latest,Command:[/bin/bash -c if command -v setsid >/dev/null 2>&1 && command -v ps >/dev/null 2>&1 && command -v pkill >/dev/null 2>&1; then Jan 28 18:11:25 crc kubenswrapper[4877]: HAVE_SESSION_TOOLS=true Jan 28 18:11:25 crc kubenswrapper[4877]: else Jan 28 18:11:25 crc kubenswrapper[4877]: HAVE_SESSION_TOOLS=false Jan 28 18:11:25 crc kubenswrapper[4877]: fi Jan 28 18:11:25 crc kubenswrapper[4877]: Jan 28 18:11:25 crc kubenswrapper[4877]: Jan 28 18:11:25 crc kubenswrapper[4877]: echo "[disk usage checker] Started" Jan 28 18:11:25 crc kubenswrapper[4877]: target_dir="/must-gather" Jan 28 18:11:25 crc kubenswrapper[4877]: usage_percentage_limit="80" Jan 28 18:11:25 crc kubenswrapper[4877]: while true; do Jan 28 18:11:25 crc kubenswrapper[4877]: usage_percentage=$(df -P "$target_dir" | awk 'NR==2 {print $5}' | sed 's/%//') Jan 28 18:11:25 crc kubenswrapper[4877]: echo "[disk usage checker] Volume usage percentage: current = ${usage_percentage} ; allowed = ${usage_percentage_limit}" Jan 28 18:11:25 crc kubenswrapper[4877]: if [ "$usage_percentage" -gt "$usage_percentage_limit" ]; then Jan 28 18:11:25 crc kubenswrapper[4877]: echo "[disk usage checker] Disk usage exceeds the volume percentage of ${usage_percentage_limit} for mounted directory, terminating..." Jan 28 18:11:25 crc kubenswrapper[4877]: if [ "$HAVE_SESSION_TOOLS" = "true" ]; then Jan 28 18:11:25 crc kubenswrapper[4877]: ps -o sess --no-headers | sort -u | while read sid; do Jan 28 18:11:25 crc kubenswrapper[4877]: [[ "$sid" -eq "${$}" ]] && continue Jan 28 18:11:25 crc kubenswrapper[4877]: pkill --signal SIGKILL --session "$sid" Jan 28 18:11:25 crc kubenswrapper[4877]: done Jan 28 18:11:25 crc kubenswrapper[4877]: else Jan 28 18:11:25 crc kubenswrapper[4877]: kill 0 Jan 28 18:11:25 crc kubenswrapper[4877]: fi Jan 28 18:11:25 crc kubenswrapper[4877]: exit 1 Jan 28 18:11:25 crc kubenswrapper[4877]: fi Jan 28 18:11:25 crc kubenswrapper[4877]: sleep 5 Jan 28 18:11:25 crc kubenswrapper[4877]: done & if [ "$HAVE_SESSION_TOOLS" = "true" ]; then Jan 28 18:11:25 crc kubenswrapper[4877]: setsid -w bash <<-MUSTGATHER_EOF Jan 28 18:11:25 crc kubenswrapper[4877]: ADDITIONAL_NAMESPACES=kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko OPENSTACK_DATABASES=ALL SOS_EDPM=all OMC=False SOS_DECOMPRESS=0 gather Jan 28 18:11:25 crc kubenswrapper[4877]: MUSTGATHER_EOF Jan 28 18:11:25 crc kubenswrapper[4877]: else Jan 28 18:11:25 crc kubenswrapper[4877]: ADDITIONAL_NAMESPACES=kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko OPENSTACK_DATABASES=ALL SOS_EDPM=all OMC=False SOS_DECOMPRESS=0 gather Jan 28 18:11:25 crc kubenswrapper[4877]: fi; sync && echo 'Caches written to disk'],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:must-gather-output,ReadOnly:false,MountPath:/must-gather,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m954w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod must-gather-hzsz5_openshift-must-gather-8qrtc(b4aebf16-08cb-45fe-8d25-0210aade1749): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Jan 28 18:11:25 crc kubenswrapper[4877]: > logger="UnhandledError" Jan 28 18:11:25 crc kubenswrapper[4877]: E0128 18:11:25.396365 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"gather\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"copy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\"]" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" Jan 28 18:11:25 crc kubenswrapper[4877]: I0128 18:11:25.904905 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4fcp" event={"ID":"0a51a0f1-93a6-4def-afc0-d5470c6d7358","Type":"ContainerStarted","Data":"b2e89226404b3a5b73c0de3c5a547803752503fa6fe1a473688298fbbfd98252"} Jan 28 18:11:25 crc kubenswrapper[4877]: E0128 18:11:25.906700 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"gather\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\", failed to \"StartContainer\" for \"copy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\"]" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" Jan 28 18:11:25 crc kubenswrapper[4877]: I0128 18:11:25.939003 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l4fcp" podStartSLOduration=3.069406685 podStartE2EDuration="23.938980448s" podCreationTimestamp="2026-01-28 18:11:02 +0000 UTC" firstStartedPulling="2026-01-28 18:11:04.640300593 +0000 UTC m=+5768.198627481" lastFinishedPulling="2026-01-28 18:11:25.509874356 +0000 UTC m=+5789.068201244" observedRunningTime="2026-01-28 18:11:25.93757254 +0000 UTC m=+5789.495899438" watchObservedRunningTime="2026-01-28 18:11:25.938980448 +0000 UTC m=+5789.497307336" Jan 28 18:11:31 crc kubenswrapper[4877]: I0128 18:11:31.331254 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:11:31 crc kubenswrapper[4877]: E0128 18:11:31.332068 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:11:33 crc kubenswrapper[4877]: I0128 18:11:33.276973 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:33 crc kubenswrapper[4877]: I0128 18:11:33.277311 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:34 crc kubenswrapper[4877]: I0128 18:11:34.327952 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-l4fcp" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="registry-server" probeResult="failure" output=< Jan 28 18:11:34 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:11:34 crc kubenswrapper[4877]: > Jan 28 18:11:41 crc kubenswrapper[4877]: I0128 18:11:41.099644 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" event={"ID":"b4aebf16-08cb-45fe-8d25-0210aade1749","Type":"ContainerStarted","Data":"658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c"} Jan 28 18:11:41 crc kubenswrapper[4877]: I0128 18:11:41.100171 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" event={"ID":"b4aebf16-08cb-45fe-8d25-0210aade1749","Type":"ContainerStarted","Data":"31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6"} Jan 28 18:11:41 crc kubenswrapper[4877]: I0128 18:11:41.117181 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" podStartSLOduration=3.321380859 podStartE2EDuration="36.11716414s" podCreationTimestamp="2026-01-28 18:11:05 +0000 UTC" firstStartedPulling="2026-01-28 18:11:07.159503315 +0000 UTC m=+5770.717830203" lastFinishedPulling="2026-01-28 18:11:39.955286576 +0000 UTC m=+5803.513613484" observedRunningTime="2026-01-28 18:11:41.113234733 +0000 UTC m=+5804.671561621" watchObservedRunningTime="2026-01-28 18:11:41.11716414 +0000 UTC m=+5804.675491028" Jan 28 18:11:41 crc kubenswrapper[4877]: I0128 18:11:41.810088 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8qrtc/must-gather-hzsz5"] Jan 28 18:11:41 crc kubenswrapper[4877]: I0128 18:11:41.822365 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8qrtc/must-gather-hzsz5"] Jan 28 18:11:42 crc kubenswrapper[4877]: I0128 18:11:42.110184 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="gather" containerID="cri-o://31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6" gracePeriod=2 Jan 28 18:11:42 crc kubenswrapper[4877]: I0128 18:11:42.110218 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="copy" containerID="cri-o://658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c" gracePeriod=2 Jan 28 18:11:43 crc kubenswrapper[4877]: I0128 18:11:43.136966 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8qrtc_must-gather-hzsz5_b4aebf16-08cb-45fe-8d25-0210aade1749/copy/0.log" Jan 28 18:11:43 crc kubenswrapper[4877]: I0128 18:11:43.138123 4877 generic.go:334] "Generic (PLEG): container finished" podID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerID="658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c" exitCode=143 Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.124437 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.201952 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.379113 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4fcp"] Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.715832 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8qrtc_must-gather-hzsz5_b4aebf16-08cb-45fe-8d25-0210aade1749/copy/0.log" Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.717280 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8qrtc_must-gather-hzsz5_b4aebf16-08cb-45fe-8d25-0210aade1749/gather/0.log" Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.717354 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.904567 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b4aebf16-08cb-45fe-8d25-0210aade1749-must-gather-output\") pod \"b4aebf16-08cb-45fe-8d25-0210aade1749\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.904629 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m954w\" (UniqueName: \"kubernetes.io/projected/b4aebf16-08cb-45fe-8d25-0210aade1749-kube-api-access-m954w\") pod \"b4aebf16-08cb-45fe-8d25-0210aade1749\" (UID: \"b4aebf16-08cb-45fe-8d25-0210aade1749\") " Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.905639 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4aebf16-08cb-45fe-8d25-0210aade1749-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b4aebf16-08cb-45fe-8d25-0210aade1749" (UID: "b4aebf16-08cb-45fe-8d25-0210aade1749"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:11:44 crc kubenswrapper[4877]: I0128 18:11:44.927762 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4aebf16-08cb-45fe-8d25-0210aade1749-kube-api-access-m954w" (OuterVolumeSpecName: "kube-api-access-m954w") pod "b4aebf16-08cb-45fe-8d25-0210aade1749" (UID: "b4aebf16-08cb-45fe-8d25-0210aade1749"). InnerVolumeSpecName "kube-api-access-m954w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.012395 4877 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b4aebf16-08cb-45fe-8d25-0210aade1749-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.012453 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m954w\" (UniqueName: \"kubernetes.io/projected/b4aebf16-08cb-45fe-8d25-0210aade1749-kube-api-access-m954w\") on node \"crc\" DevicePath \"\"" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.173921 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8qrtc_must-gather-hzsz5_b4aebf16-08cb-45fe-8d25-0210aade1749/copy/0.log" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.174409 4877 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-8qrtc_must-gather-hzsz5_b4aebf16-08cb-45fe-8d25-0210aade1749/gather/0.log" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.174451 4877 generic.go:334] "Generic (PLEG): container finished" podID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerID="31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6" exitCode=137 Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.174635 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8qrtc/must-gather-hzsz5" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.174713 4877 scope.go:117] "RemoveContainer" containerID="658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.175102 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l4fcp" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="registry-server" containerID="cri-o://b2e89226404b3a5b73c0de3c5a547803752503fa6fe1a473688298fbbfd98252" gracePeriod=2 Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.221457 4877 scope.go:117] "RemoveContainer" containerID="31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.273249 4877 scope.go:117] "RemoveContainer" containerID="658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c" Jan 28 18:11:45 crc kubenswrapper[4877]: E0128 18:11:45.276123 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c\": container with ID starting with 658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c not found: ID does not exist" containerID="658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.276199 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c"} err="failed to get container status \"658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c\": rpc error: code = NotFound desc = could not find container \"658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c\": container with ID starting with 658dcd305c0fb1e2cf6c6aed17c1602850d740caee034a1a51ea516be60f848c not found: ID does not exist" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.276235 4877 scope.go:117] "RemoveContainer" containerID="31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6" Jan 28 18:11:45 crc kubenswrapper[4877]: E0128 18:11:45.276667 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6\": container with ID starting with 31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6 not found: ID does not exist" containerID="31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.276724 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6"} err="failed to get container status \"31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6\": rpc error: code = NotFound desc = could not find container \"31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6\": container with ID starting with 31019bebdd08fb620026979c0343e4a7bf0001e83cef1b7f2d8b5c84f41689e6 not found: ID does not exist" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.331619 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:11:45 crc kubenswrapper[4877]: E0128 18:11:45.332294 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:11:45 crc kubenswrapper[4877]: I0128 18:11:45.352190 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" path="/var/lib/kubelet/pods/b4aebf16-08cb-45fe-8d25-0210aade1749/volumes" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.186601 4877 generic.go:334] "Generic (PLEG): container finished" podID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerID="b2e89226404b3a5b73c0de3c5a547803752503fa6fe1a473688298fbbfd98252" exitCode=0 Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.188043 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4fcp" event={"ID":"0a51a0f1-93a6-4def-afc0-d5470c6d7358","Type":"ContainerDied","Data":"b2e89226404b3a5b73c0de3c5a547803752503fa6fe1a473688298fbbfd98252"} Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.188146 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4fcp" event={"ID":"0a51a0f1-93a6-4def-afc0-d5470c6d7358","Type":"ContainerDied","Data":"bc5ea6bbdeaa7ce2a4d97f8a4b0edf58a85be752d6631db21e2838b92104d411"} Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.188319 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc5ea6bbdeaa7ce2a4d97f8a4b0edf58a85be752d6631db21e2838b92104d411" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.212087 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.340861 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-catalog-content\") pod \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.342108 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-utilities" (OuterVolumeSpecName: "utilities") pod "0a51a0f1-93a6-4def-afc0-d5470c6d7358" (UID: "0a51a0f1-93a6-4def-afc0-d5470c6d7358"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.342190 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-utilities\") pod \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.342628 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fm2r\" (UniqueName: \"kubernetes.io/projected/0a51a0f1-93a6-4def-afc0-d5470c6d7358-kube-api-access-2fm2r\") pod \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\" (UID: \"0a51a0f1-93a6-4def-afc0-d5470c6d7358\") " Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.345778 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.355053 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a51a0f1-93a6-4def-afc0-d5470c6d7358-kube-api-access-2fm2r" (OuterVolumeSpecName: "kube-api-access-2fm2r") pod "0a51a0f1-93a6-4def-afc0-d5470c6d7358" (UID: "0a51a0f1-93a6-4def-afc0-d5470c6d7358"). InnerVolumeSpecName "kube-api-access-2fm2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.401542 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a51a0f1-93a6-4def-afc0-d5470c6d7358" (UID: "0a51a0f1-93a6-4def-afc0-d5470c6d7358"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.447008 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a51a0f1-93a6-4def-afc0-d5470c6d7358-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:11:46 crc kubenswrapper[4877]: I0128 18:11:46.447054 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fm2r\" (UniqueName: \"kubernetes.io/projected/0a51a0f1-93a6-4def-afc0-d5470c6d7358-kube-api-access-2fm2r\") on node \"crc\" DevicePath \"\"" Jan 28 18:11:47 crc kubenswrapper[4877]: I0128 18:11:47.200036 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4fcp" Jan 28 18:11:47 crc kubenswrapper[4877]: I0128 18:11:47.252250 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4fcp"] Jan 28 18:11:47 crc kubenswrapper[4877]: I0128 18:11:47.269627 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l4fcp"] Jan 28 18:11:47 crc kubenswrapper[4877]: I0128 18:11:47.346567 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" path="/var/lib/kubelet/pods/0a51a0f1-93a6-4def-afc0-d5470c6d7358/volumes" Jan 28 18:11:59 crc kubenswrapper[4877]: I0128 18:11:59.335242 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:11:59 crc kubenswrapper[4877]: E0128 18:11:59.337851 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:12:14 crc kubenswrapper[4877]: I0128 18:12:14.330620 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:12:14 crc kubenswrapper[4877]: E0128 18:12:14.331423 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:12:26 crc kubenswrapper[4877]: I0128 18:12:26.331053 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:12:26 crc kubenswrapper[4877]: E0128 18:12:26.333367 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:12:40 crc kubenswrapper[4877]: I0128 18:12:40.331168 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:12:40 crc kubenswrapper[4877]: E0128 18:12:40.332898 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:12:51 crc kubenswrapper[4877]: I0128 18:12:51.330929 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:12:51 crc kubenswrapper[4877]: E0128 18:12:51.332038 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:13:05 crc kubenswrapper[4877]: I0128 18:13:05.331559 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:13:05 crc kubenswrapper[4877]: E0128 18:13:05.332265 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:13:20 crc kubenswrapper[4877]: I0128 18:13:20.330712 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:13:20 crc kubenswrapper[4877]: E0128 18:13:20.331730 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:13:35 crc kubenswrapper[4877]: I0128 18:13:35.330761 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:13:35 crc kubenswrapper[4877]: E0128 18:13:35.331746 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:13:48 crc kubenswrapper[4877]: I0128 18:13:48.331509 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:13:48 crc kubenswrapper[4877]: E0128 18:13:48.332495 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.783448 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2svw6"] Jan 28 18:13:56 crc kubenswrapper[4877]: E0128 18:13:56.784613 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="registry-server" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.784708 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="registry-server" Jan 28 18:13:56 crc kubenswrapper[4877]: E0128 18:13:56.784749 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="extract-utilities" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.784761 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="extract-utilities" Jan 28 18:13:56 crc kubenswrapper[4877]: E0128 18:13:56.784799 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="copy" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.784808 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="copy" Jan 28 18:13:56 crc kubenswrapper[4877]: E0128 18:13:56.784820 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="extract-content" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.784827 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="extract-content" Jan 28 18:13:56 crc kubenswrapper[4877]: E0128 18:13:56.784843 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="gather" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.784852 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="gather" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.785127 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a51a0f1-93a6-4def-afc0-d5470c6d7358" containerName="registry-server" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.785161 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="gather" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.785185 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4aebf16-08cb-45fe-8d25-0210aade1749" containerName="copy" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.787470 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.810659 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2svw6"] Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.862335 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-catalog-content\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.862750 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6znwt\" (UniqueName: \"kubernetes.io/projected/cfa54c3b-a704-4bd4-864b-acb1c7c23667-kube-api-access-6znwt\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.862797 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-utilities\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.965986 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6znwt\" (UniqueName: \"kubernetes.io/projected/cfa54c3b-a704-4bd4-864b-acb1c7c23667-kube-api-access-6znwt\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.966102 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-utilities\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.966611 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-utilities\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.966615 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-catalog-content\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.967017 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-catalog-content\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:56 crc kubenswrapper[4877]: I0128 18:13:56.990352 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6znwt\" (UniqueName: \"kubernetes.io/projected/cfa54c3b-a704-4bd4-864b-acb1c7c23667-kube-api-access-6znwt\") pod \"redhat-marketplace-2svw6\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:57 crc kubenswrapper[4877]: I0128 18:13:57.122988 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:13:57 crc kubenswrapper[4877]: I0128 18:13:57.638088 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2svw6"] Jan 28 18:13:57 crc kubenswrapper[4877]: I0128 18:13:57.777169 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2svw6" event={"ID":"cfa54c3b-a704-4bd4-864b-acb1c7c23667","Type":"ContainerStarted","Data":"b617d7f2b196ab9adcf5efa8c7062a0b339a5daa1ddc98fc6935a9a6a2fe7d48"} Jan 28 18:13:58 crc kubenswrapper[4877]: I0128 18:13:58.790328 4877 generic.go:334] "Generic (PLEG): container finished" podID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerID="4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128" exitCode=0 Jan 28 18:13:58 crc kubenswrapper[4877]: I0128 18:13:58.790449 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2svw6" event={"ID":"cfa54c3b-a704-4bd4-864b-acb1c7c23667","Type":"ContainerDied","Data":"4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128"} Jan 28 18:14:00 crc kubenswrapper[4877]: I0128 18:14:00.816146 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2svw6" event={"ID":"cfa54c3b-a704-4bd4-864b-acb1c7c23667","Type":"ContainerStarted","Data":"2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207"} Jan 28 18:14:01 crc kubenswrapper[4877]: I0128 18:14:01.331678 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:14:01 crc kubenswrapper[4877]: E0128 18:14:01.332331 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:14:01 crc kubenswrapper[4877]: I0128 18:14:01.828239 4877 generic.go:334] "Generic (PLEG): container finished" podID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerID="2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207" exitCode=0 Jan 28 18:14:01 crc kubenswrapper[4877]: I0128 18:14:01.828289 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2svw6" event={"ID":"cfa54c3b-a704-4bd4-864b-acb1c7c23667","Type":"ContainerDied","Data":"2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207"} Jan 28 18:14:02 crc kubenswrapper[4877]: I0128 18:14:02.858609 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2svw6" event={"ID":"cfa54c3b-a704-4bd4-864b-acb1c7c23667","Type":"ContainerStarted","Data":"83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884"} Jan 28 18:14:02 crc kubenswrapper[4877]: I0128 18:14:02.906165 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2svw6" podStartSLOduration=3.378001009 podStartE2EDuration="6.906135201s" podCreationTimestamp="2026-01-28 18:13:56 +0000 UTC" firstStartedPulling="2026-01-28 18:13:58.792818658 +0000 UTC m=+5942.351145556" lastFinishedPulling="2026-01-28 18:14:02.32095286 +0000 UTC m=+5945.879279748" observedRunningTime="2026-01-28 18:14:02.888726077 +0000 UTC m=+5946.447052965" watchObservedRunningTime="2026-01-28 18:14:02.906135201 +0000 UTC m=+5946.464462089" Jan 28 18:14:07 crc kubenswrapper[4877]: I0128 18:14:07.123154 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:14:07 crc kubenswrapper[4877]: I0128 18:14:07.125009 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:14:08 crc kubenswrapper[4877]: I0128 18:14:08.184890 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-2svw6" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="registry-server" probeResult="failure" output=< Jan 28 18:14:08 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:14:08 crc kubenswrapper[4877]: > Jan 28 18:14:14 crc kubenswrapper[4877]: I0128 18:14:14.330611 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:14:14 crc kubenswrapper[4877]: E0128 18:14:14.331566 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:14:18 crc kubenswrapper[4877]: I0128 18:14:18.169972 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-2svw6" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="registry-server" probeResult="failure" output=< Jan 28 18:14:18 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:14:18 crc kubenswrapper[4877]: > Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.586418 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-v8cp8"] Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.590885 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.599115 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v8cp8"] Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.710209 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-catalog-content\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.710294 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-utilities\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.710457 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbchq\" (UniqueName: \"kubernetes.io/projected/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-kube-api-access-bbchq\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.812271 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbchq\" (UniqueName: \"kubernetes.io/projected/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-kube-api-access-bbchq\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.812364 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-catalog-content\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.812431 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-utilities\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.812991 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-catalog-content\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.813029 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-utilities\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.832844 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbchq\" (UniqueName: \"kubernetes.io/projected/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-kube-api-access-bbchq\") pod \"certified-operators-v8cp8\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:24 crc kubenswrapper[4877]: I0128 18:14:24.939700 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:25 crc kubenswrapper[4877]: I0128 18:14:25.330945 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:14:25 crc kubenswrapper[4877]: E0128 18:14:25.331781 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:14:25 crc kubenswrapper[4877]: I0128 18:14:25.418894 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v8cp8"] Jan 28 18:14:26 crc kubenswrapper[4877]: I0128 18:14:26.139123 4877 generic.go:334] "Generic (PLEG): container finished" podID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerID="5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9" exitCode=0 Jan 28 18:14:26 crc kubenswrapper[4877]: I0128 18:14:26.139187 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v8cp8" event={"ID":"83af3ac1-17bb-4814-9b32-4a2eaa761f6a","Type":"ContainerDied","Data":"5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9"} Jan 28 18:14:26 crc kubenswrapper[4877]: I0128 18:14:26.139227 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v8cp8" event={"ID":"83af3ac1-17bb-4814-9b32-4a2eaa761f6a","Type":"ContainerStarted","Data":"e4a84b52eabf985d6bd407e8c88d92da50e8d091bd8ccbe9c12b0ca19ad59e77"} Jan 28 18:14:27 crc kubenswrapper[4877]: I0128 18:14:27.185579 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:14:27 crc kubenswrapper[4877]: I0128 18:14:27.247916 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:14:28 crc kubenswrapper[4877]: I0128 18:14:28.161444 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v8cp8" event={"ID":"83af3ac1-17bb-4814-9b32-4a2eaa761f6a","Type":"ContainerStarted","Data":"eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f"} Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.157064 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2svw6"] Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.171817 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2svw6" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="registry-server" containerID="cri-o://83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884" gracePeriod=2 Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.746272 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.856622 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-catalog-content\") pod \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.857010 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-utilities\") pod \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.857137 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6znwt\" (UniqueName: \"kubernetes.io/projected/cfa54c3b-a704-4bd4-864b-acb1c7c23667-kube-api-access-6znwt\") pod \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\" (UID: \"cfa54c3b-a704-4bd4-864b-acb1c7c23667\") " Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.857600 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-utilities" (OuterVolumeSpecName: "utilities") pod "cfa54c3b-a704-4bd4-864b-acb1c7c23667" (UID: "cfa54c3b-a704-4bd4-864b-acb1c7c23667"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.858031 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.865974 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfa54c3b-a704-4bd4-864b-acb1c7c23667-kube-api-access-6znwt" (OuterVolumeSpecName: "kube-api-access-6znwt") pod "cfa54c3b-a704-4bd4-864b-acb1c7c23667" (UID: "cfa54c3b-a704-4bd4-864b-acb1c7c23667"). InnerVolumeSpecName "kube-api-access-6znwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.879309 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cfa54c3b-a704-4bd4-864b-acb1c7c23667" (UID: "cfa54c3b-a704-4bd4-864b-acb1c7c23667"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.960923 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfa54c3b-a704-4bd4-864b-acb1c7c23667-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:14:29 crc kubenswrapper[4877]: I0128 18:14:29.960964 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6znwt\" (UniqueName: \"kubernetes.io/projected/cfa54c3b-a704-4bd4-864b-acb1c7c23667-kube-api-access-6znwt\") on node \"crc\" DevicePath \"\"" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.184930 4877 generic.go:334] "Generic (PLEG): container finished" podID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerID="eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f" exitCode=0 Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.185002 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v8cp8" event={"ID":"83af3ac1-17bb-4814-9b32-4a2eaa761f6a","Type":"ContainerDied","Data":"eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f"} Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.193435 4877 generic.go:334] "Generic (PLEG): container finished" podID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerID="83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884" exitCode=0 Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.193508 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2svw6" event={"ID":"cfa54c3b-a704-4bd4-864b-acb1c7c23667","Type":"ContainerDied","Data":"83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884"} Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.193553 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2svw6" event={"ID":"cfa54c3b-a704-4bd4-864b-acb1c7c23667","Type":"ContainerDied","Data":"b617d7f2b196ab9adcf5efa8c7062a0b339a5daa1ddc98fc6935a9a6a2fe7d48"} Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.193578 4877 scope.go:117] "RemoveContainer" containerID="83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.193580 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2svw6" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.226866 4877 scope.go:117] "RemoveContainer" containerID="2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.235371 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2svw6"] Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.251387 4877 scope.go:117] "RemoveContainer" containerID="4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.265329 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2svw6"] Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.308947 4877 scope.go:117] "RemoveContainer" containerID="83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884" Jan 28 18:14:30 crc kubenswrapper[4877]: E0128 18:14:30.309464 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884\": container with ID starting with 83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884 not found: ID does not exist" containerID="83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.309526 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884"} err="failed to get container status \"83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884\": rpc error: code = NotFound desc = could not find container \"83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884\": container with ID starting with 83d54431cf3d99c28b8ff71881c1a3bf2b0a2eb568ce77ca0d660690cb6c7884 not found: ID does not exist" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.309553 4877 scope.go:117] "RemoveContainer" containerID="2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207" Jan 28 18:14:30 crc kubenswrapper[4877]: E0128 18:14:30.310200 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207\": container with ID starting with 2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207 not found: ID does not exist" containerID="2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.310381 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207"} err="failed to get container status \"2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207\": rpc error: code = NotFound desc = could not find container \"2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207\": container with ID starting with 2c632481612d8fb35c7c186ef57874985f5d39c8a41c1175bd7f056baa73f207 not found: ID does not exist" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.310512 4877 scope.go:117] "RemoveContainer" containerID="4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128" Jan 28 18:14:30 crc kubenswrapper[4877]: E0128 18:14:30.310912 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128\": container with ID starting with 4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128 not found: ID does not exist" containerID="4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128" Jan 28 18:14:30 crc kubenswrapper[4877]: I0128 18:14:30.310946 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128"} err="failed to get container status \"4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128\": rpc error: code = NotFound desc = could not find container \"4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128\": container with ID starting with 4a409c167c96afc9807235a651ef96c9130c0d7ae21f6d8911754e0be97e0128 not found: ID does not exist" Jan 28 18:14:31 crc kubenswrapper[4877]: I0128 18:14:31.207869 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v8cp8" event={"ID":"83af3ac1-17bb-4814-9b32-4a2eaa761f6a","Type":"ContainerStarted","Data":"c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0"} Jan 28 18:14:31 crc kubenswrapper[4877]: I0128 18:14:31.231317 4877 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-v8cp8" podStartSLOduration=2.718600156 podStartE2EDuration="7.231297811s" podCreationTimestamp="2026-01-28 18:14:24 +0000 UTC" firstStartedPulling="2026-01-28 18:14:26.140982646 +0000 UTC m=+5969.699309534" lastFinishedPulling="2026-01-28 18:14:30.653680301 +0000 UTC m=+5974.212007189" observedRunningTime="2026-01-28 18:14:31.2244243 +0000 UTC m=+5974.782751188" watchObservedRunningTime="2026-01-28 18:14:31.231297811 +0000 UTC m=+5974.789624699" Jan 28 18:14:31 crc kubenswrapper[4877]: I0128 18:14:31.343837 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" path="/var/lib/kubelet/pods/cfa54c3b-a704-4bd4-864b-acb1c7c23667/volumes" Jan 28 18:14:35 crc kubenswrapper[4877]: I0128 18:14:35.203953 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:35 crc kubenswrapper[4877]: I0128 18:14:35.204469 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:36 crc kubenswrapper[4877]: I0128 18:14:36.262061 4877 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-v8cp8" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="registry-server" probeResult="failure" output=< Jan 28 18:14:36 crc kubenswrapper[4877]: timeout: failed to connect service ":50051" within 1s Jan 28 18:14:36 crc kubenswrapper[4877]: > Jan 28 18:14:40 crc kubenswrapper[4877]: I0128 18:14:40.330788 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:14:40 crc kubenswrapper[4877]: E0128 18:14:40.331530 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:14:45 crc kubenswrapper[4877]: I0128 18:14:45.042192 4877 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:45 crc kubenswrapper[4877]: I0128 18:14:45.127732 4877 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:45 crc kubenswrapper[4877]: I0128 18:14:45.301507 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v8cp8"] Jan 28 18:14:46 crc kubenswrapper[4877]: I0128 18:14:46.382226 4877 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-v8cp8" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="registry-server" containerID="cri-o://c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0" gracePeriod=2 Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.394142 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.394266 4877 generic.go:334] "Generic (PLEG): container finished" podID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerID="c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0" exitCode=0 Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.394293 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v8cp8" event={"ID":"83af3ac1-17bb-4814-9b32-4a2eaa761f6a","Type":"ContainerDied","Data":"c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0"} Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.394598 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v8cp8" event={"ID":"83af3ac1-17bb-4814-9b32-4a2eaa761f6a","Type":"ContainerDied","Data":"e4a84b52eabf985d6bd407e8c88d92da50e8d091bd8ccbe9c12b0ca19ad59e77"} Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.394626 4877 scope.go:117] "RemoveContainer" containerID="c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.423838 4877 scope.go:117] "RemoveContainer" containerID="eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.455657 4877 scope.go:117] "RemoveContainer" containerID="5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.511372 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbchq\" (UniqueName: \"kubernetes.io/projected/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-kube-api-access-bbchq\") pod \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.511422 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-catalog-content\") pod \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.511499 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-utilities\") pod \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\" (UID: \"83af3ac1-17bb-4814-9b32-4a2eaa761f6a\") " Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.512373 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-utilities" (OuterVolumeSpecName: "utilities") pod "83af3ac1-17bb-4814-9b32-4a2eaa761f6a" (UID: "83af3ac1-17bb-4814-9b32-4a2eaa761f6a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.522250 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-kube-api-access-bbchq" (OuterVolumeSpecName: "kube-api-access-bbchq") pod "83af3ac1-17bb-4814-9b32-4a2eaa761f6a" (UID: "83af3ac1-17bb-4814-9b32-4a2eaa761f6a"). InnerVolumeSpecName "kube-api-access-bbchq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.526389 4877 scope.go:117] "RemoveContainer" containerID="c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0" Jan 28 18:14:47 crc kubenswrapper[4877]: E0128 18:14:47.526830 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0\": container with ID starting with c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0 not found: ID does not exist" containerID="c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.526908 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0"} err="failed to get container status \"c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0\": rpc error: code = NotFound desc = could not find container \"c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0\": container with ID starting with c92321ba017007a26dfe9f914149ac1ca1e23814aab0ffbd805119619a4473e0 not found: ID does not exist" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.526948 4877 scope.go:117] "RemoveContainer" containerID="eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f" Jan 28 18:14:47 crc kubenswrapper[4877]: E0128 18:14:47.527169 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f\": container with ID starting with eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f not found: ID does not exist" containerID="eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.527214 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f"} err="failed to get container status \"eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f\": rpc error: code = NotFound desc = could not find container \"eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f\": container with ID starting with eda96ed6979b63c42d60abe6855ad7d53b9878845234e9ee10167c7bd4f19d2f not found: ID does not exist" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.527227 4877 scope.go:117] "RemoveContainer" containerID="5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9" Jan 28 18:14:47 crc kubenswrapper[4877]: E0128 18:14:47.527753 4877 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9\": container with ID starting with 5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9 not found: ID does not exist" containerID="5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.527781 4877 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9"} err="failed to get container status \"5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9\": rpc error: code = NotFound desc = could not find container \"5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9\": container with ID starting with 5c43d8af31b0f75905400c9b9a3538d6b506737755a5e67838dfd353c341a7c9 not found: ID does not exist" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.578415 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83af3ac1-17bb-4814-9b32-4a2eaa761f6a" (UID: "83af3ac1-17bb-4814-9b32-4a2eaa761f6a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.614019 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbchq\" (UniqueName: \"kubernetes.io/projected/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-kube-api-access-bbchq\") on node \"crc\" DevicePath \"\"" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.614062 4877 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:14:47 crc kubenswrapper[4877]: I0128 18:14:47.614072 4877 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83af3ac1-17bb-4814-9b32-4a2eaa761f6a-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:14:48 crc kubenswrapper[4877]: I0128 18:14:48.406801 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v8cp8" Jan 28 18:14:48 crc kubenswrapper[4877]: I0128 18:14:48.482729 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v8cp8"] Jan 28 18:14:48 crc kubenswrapper[4877]: I0128 18:14:48.495926 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-v8cp8"] Jan 28 18:14:49 crc kubenswrapper[4877]: I0128 18:14:49.345985 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" path="/var/lib/kubelet/pods/83af3ac1-17bb-4814-9b32-4a2eaa761f6a/volumes" Jan 28 18:14:54 crc kubenswrapper[4877]: I0128 18:14:54.330771 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:14:54 crc kubenswrapper[4877]: E0128 18:14:54.331440 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.185710 4877 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk"] Jan 28 18:15:00 crc kubenswrapper[4877]: E0128 18:15:00.186847 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="extract-content" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.186866 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="extract-content" Jan 28 18:15:00 crc kubenswrapper[4877]: E0128 18:15:00.186900 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="registry-server" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.186908 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="registry-server" Jan 28 18:15:00 crc kubenswrapper[4877]: E0128 18:15:00.186935 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="extract-content" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.186944 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="extract-content" Jan 28 18:15:00 crc kubenswrapper[4877]: E0128 18:15:00.186968 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="registry-server" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.186975 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="registry-server" Jan 28 18:15:00 crc kubenswrapper[4877]: E0128 18:15:00.186991 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="extract-utilities" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.186998 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="extract-utilities" Jan 28 18:15:00 crc kubenswrapper[4877]: E0128 18:15:00.187025 4877 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="extract-utilities" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.187033 4877 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="extract-utilities" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.187334 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfa54c3b-a704-4bd4-864b-acb1c7c23667" containerName="registry-server" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.187359 4877 memory_manager.go:354] "RemoveStaleState removing state" podUID="83af3ac1-17bb-4814-9b32-4a2eaa761f6a" containerName="registry-server" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.188448 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.196627 4877 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.200933 4877 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.232501 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk"] Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.263292 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72636388-ae35-419c-aead-15e2265e3aa1-config-volume\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.263605 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76jj7\" (UniqueName: \"kubernetes.io/projected/72636388-ae35-419c-aead-15e2265e3aa1-kube-api-access-76jj7\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.263683 4877 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72636388-ae35-419c-aead-15e2265e3aa1-secret-volume\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.366573 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72636388-ae35-419c-aead-15e2265e3aa1-config-volume\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.366746 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76jj7\" (UniqueName: \"kubernetes.io/projected/72636388-ae35-419c-aead-15e2265e3aa1-kube-api-access-76jj7\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.366791 4877 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72636388-ae35-419c-aead-15e2265e3aa1-secret-volume\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.367635 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72636388-ae35-419c-aead-15e2265e3aa1-config-volume\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.373363 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72636388-ae35-419c-aead-15e2265e3aa1-secret-volume\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.395098 4877 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76jj7\" (UniqueName: \"kubernetes.io/projected/72636388-ae35-419c-aead-15e2265e3aa1-kube-api-access-76jj7\") pod \"collect-profiles-29493735-qfxkk\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:00 crc kubenswrapper[4877]: I0128 18:15:00.512573 4877 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:01 crc kubenswrapper[4877]: I0128 18:15:01.029164 4877 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk"] Jan 28 18:15:01 crc kubenswrapper[4877]: I0128 18:15:01.577388 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" event={"ID":"72636388-ae35-419c-aead-15e2265e3aa1","Type":"ContainerStarted","Data":"8e7988500e58aeeb2664fd4ea0681faef3146c31c8d3cd984d2e508dfa4a1928"} Jan 28 18:15:01 crc kubenswrapper[4877]: I0128 18:15:01.577648 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" event={"ID":"72636388-ae35-419c-aead-15e2265e3aa1","Type":"ContainerStarted","Data":"97d7ada0df5a89d28a9b2f7c2fc191a89afb0cd6c2255dc382bd46196b34bbbf"} Jan 28 18:15:02 crc kubenswrapper[4877]: I0128 18:15:02.590352 4877 generic.go:334] "Generic (PLEG): container finished" podID="72636388-ae35-419c-aead-15e2265e3aa1" containerID="8e7988500e58aeeb2664fd4ea0681faef3146c31c8d3cd984d2e508dfa4a1928" exitCode=0 Jan 28 18:15:02 crc kubenswrapper[4877]: I0128 18:15:02.590461 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" event={"ID":"72636388-ae35-419c-aead-15e2265e3aa1","Type":"ContainerDied","Data":"8e7988500e58aeeb2664fd4ea0681faef3146c31c8d3cd984d2e508dfa4a1928"} Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.083457 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.181446 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72636388-ae35-419c-aead-15e2265e3aa1-config-volume\") pod \"72636388-ae35-419c-aead-15e2265e3aa1\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.181684 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72636388-ae35-419c-aead-15e2265e3aa1-secret-volume\") pod \"72636388-ae35-419c-aead-15e2265e3aa1\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.181742 4877 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76jj7\" (UniqueName: \"kubernetes.io/projected/72636388-ae35-419c-aead-15e2265e3aa1-kube-api-access-76jj7\") pod \"72636388-ae35-419c-aead-15e2265e3aa1\" (UID: \"72636388-ae35-419c-aead-15e2265e3aa1\") " Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.186420 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72636388-ae35-419c-aead-15e2265e3aa1-config-volume" (OuterVolumeSpecName: "config-volume") pod "72636388-ae35-419c-aead-15e2265e3aa1" (UID: "72636388-ae35-419c-aead-15e2265e3aa1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.190054 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72636388-ae35-419c-aead-15e2265e3aa1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "72636388-ae35-419c-aead-15e2265e3aa1" (UID: "72636388-ae35-419c-aead-15e2265e3aa1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.194107 4877 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72636388-ae35-419c-aead-15e2265e3aa1-kube-api-access-76jj7" (OuterVolumeSpecName: "kube-api-access-76jj7") pod "72636388-ae35-419c-aead-15e2265e3aa1" (UID: "72636388-ae35-419c-aead-15e2265e3aa1"). InnerVolumeSpecName "kube-api-access-76jj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.285614 4877 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/72636388-ae35-419c-aead-15e2265e3aa1-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.285989 4877 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76jj7\" (UniqueName: \"kubernetes.io/projected/72636388-ae35-419c-aead-15e2265e3aa1-kube-api-access-76jj7\") on node \"crc\" DevicePath \"\"" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.286004 4877 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/72636388-ae35-419c-aead-15e2265e3aa1-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.620943 4877 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" event={"ID":"72636388-ae35-419c-aead-15e2265e3aa1","Type":"ContainerDied","Data":"97d7ada0df5a89d28a9b2f7c2fc191a89afb0cd6c2255dc382bd46196b34bbbf"} Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.620984 4877 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97d7ada0df5a89d28a9b2f7c2fc191a89afb0cd6c2255dc382bd46196b34bbbf" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.621013 4877 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493735-qfxkk" Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.705887 4877 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd"] Jan 28 18:15:04 crc kubenswrapper[4877]: I0128 18:15:04.719646 4877 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493690-9gxxd"] Jan 28 18:15:05 crc kubenswrapper[4877]: I0128 18:15:05.331676 4877 scope.go:117] "RemoveContainer" containerID="b1c4effb1f040c018c9aa2895646fd94a7f033996b2c66b6b55f1c25566aa97b" Jan 28 18:15:05 crc kubenswrapper[4877]: E0128 18:15:05.333027 4877 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6xsrm_openshift-machine-config-operator(95a2e787-3c51-42f8-b6fc-46b7c39ed39d)\"" pod="openshift-machine-config-operator/machine-config-daemon-6xsrm" podUID="95a2e787-3c51-42f8-b6fc-46b7c39ed39d" Jan 28 18:15:05 crc kubenswrapper[4877]: I0128 18:15:05.346008 4877 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d" path="/var/lib/kubelet/pods/1ec7a03e-8732-4dfe-90c2-4b9cd48d4c7d/volumes" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136451077024456 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136451100017356 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136434724016516 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136434724015466 5ustar corecore